2024-12-11 22:55:59,267 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-11 22:55:59,296 main DEBUG Took 0.025264 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 22:55:59,297 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 22:55:59,298 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 22:55:59,300 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 22:55:59,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,319 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 22:55:59,340 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,342 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,343 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,343 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,344 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,345 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,346 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,346 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,348 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,348 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,350 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,350 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,351 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,351 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,352 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,352 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,353 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,353 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,354 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,355 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,355 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,356 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,356 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,357 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:55:59,357 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,358 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 22:55:59,363 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:55:59,367 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 22:55:59,371 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 22:55:59,380 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 22:55:59,392 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 22:55:59,392 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 22:55:59,410 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 22:55:59,414 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 22:55:59,417 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 22:55:59,417 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 22:55:59,418 main DEBUG createAppenders(={Console}) 2024-12-11 22:55:59,419 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-11 22:55:59,420 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-11 22:55:59,420 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-11 22:55:59,421 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 22:55:59,422 main DEBUG OutputStream closed 2024-12-11 22:55:59,422 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 22:55:59,423 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 22:55:59,423 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-11 22:55:59,602 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 22:55:59,605 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 22:55:59,616 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 22:55:59,619 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 22:55:59,620 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 22:55:59,629 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 22:55:59,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 22:55:59,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 22:55:59,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 22:55:59,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 22:55:59,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 22:55:59,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 22:55:59,648 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 22:55:59,649 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 22:55:59,649 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 22:55:59,650 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 22:55:59,650 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 22:55:59,651 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 22:55:59,657 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 22:55:59,658 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@1608bcbd) with optional ClassLoader: null 2024-12-11 22:55:59,658 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 22:55:59,678 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@1608bcbd] started OK. 2024-12-11T22:55:59,705 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-11 22:55:59,712 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 22:55:59,712 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T22:56:00,629 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b 2024-12-11T22:56:00,630 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-11T22:56:00,630 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-11T22:56:00,734 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T22:56:01,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T22:56:01,203 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380, deleteOnExit=true 2024-12-11T22:56:01,204 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T22:56:01,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/test.cache.data in system properties and HBase conf 2024-12-11T22:56:01,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T22:56:01,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir in system properties and HBase conf 2024-12-11T22:56:01,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T22:56:01,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T22:56:01,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T22:56:01,354 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T22:56:01,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:56:01,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:56:01,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T22:56:01,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:56:01,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T22:56:01,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T22:56:01,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:56:01,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:56:01,377 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T22:56:01,377 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/nfs.dump.dir in system properties and HBase conf 2024-12-11T22:56:01,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir in system properties and HBase conf 2024-12-11T22:56:01,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:56:01,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T22:56:01,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T22:56:02,913 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T22:56:03,135 INFO [Time-limited test {}] log.Log(170): Logging initialized @5375ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T22:56:03,393 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:03,526 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:03,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:03,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:03,616 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:56:03,644 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:03,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41878587{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:03,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20e934eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:05,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44c957a2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-44931-hadoop-hdfs-3_4_1-tests_jar-_-any-12565937432012017202/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T22:56:05,030 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:44931} 2024-12-11T22:56:05,030 INFO [Time-limited test {}] server.Server(415): Started @7272ms 2024-12-11T22:56:06,239 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:06,247 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:06,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:06,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:06,251 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:56:06,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@394d89b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:06,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27a35a11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:06,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b3e82e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-33665-hadoop-hdfs-3_4_1-tests_jar-_-any-13099563806845922636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:56:06,364 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:33665} 2024-12-11T22:56:06,364 INFO [Time-limited test {}] server.Server(415): Started @8606ms 2024-12-11T22:56:06,425 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:56:06,585 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:06,594 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:06,615 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:06,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:06,616 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:56:06,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f30d4b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:06,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca589bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:06,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@213fabce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-44567-hadoop-hdfs-3_4_1-tests_jar-_-any-18097709843509235606/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:56:06,760 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:44567} 2024-12-11T22:56:06,761 INFO [Time-limited test {}] server.Server(415): Started @9002ms 2024-12-11T22:56:06,767 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:56:06,833 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:06,839 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:06,844 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:06,844 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:06,844 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:56:06,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@693ee7fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:06,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bc523e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:06,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68703158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-41761-hadoop-hdfs-3_4_1-tests_jar-_-any-6472939253967046220/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:56:06,959 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:41761} 2024-12-11T22:56:06,960 INFO [Time-limited test {}] server.Server(415): Started @9201ms 2024-12-11T22:56:06,962 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:56:08,279 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347/current, will proceed with Du for space computation calculation, 2024-12-11T22:56:08,279 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347/current, will proceed with Du for space computation calculation, 2024-12-11T22:56:08,293 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347/current, will proceed with Du for space computation calculation, 2024-12-11T22:56:08,296 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347/current, will proceed with Du for space computation calculation, 2024-12-11T22:56:08,361 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:56:08,361 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:56:08,447 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347/current, will proceed with Du for space computation calculation, 2024-12-11T22:56:08,450 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347/current, will proceed with Du for space computation calculation, 2024-12-11T22:56:08,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x628513354ac41493 with lease ID 0x95849f012978cc4f: Processing first storage report for DS-0c049be1-bad2-41a4-b16f-833e5e25857a from datanode DatanodeRegistration(127.0.0.1:39843, datanodeUuid=d14f1d73-c28d-4c4a-b4f5-a79345f45ddf, infoPort=42177, infoSecurePort=0, ipcPort=35637, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347) 2024-12-11T22:56:08,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x628513354ac41493 with lease ID 0x95849f012978cc4f: from storage DS-0c049be1-bad2-41a4-b16f-833e5e25857a node DatanodeRegistration(127.0.0.1:39843, datanodeUuid=d14f1d73-c28d-4c4a-b4f5-a79345f45ddf, infoPort=42177, infoSecurePort=0, ipcPort=35637, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T22:56:08,483 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1a0d88699c3d3b8 with lease ID 0x95849f012978cc50: Processing first storage report for DS-03ef22e9-098d-4974-baec-426a6b14708f from datanode DatanodeRegistration(127.0.0.1:41607, datanodeUuid=5de375d8-4cc1-4ec3-8d8a-5a2117eca9f7, infoPort=33915, infoSecurePort=0, ipcPort=37769, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347) 2024-12-11T22:56:08,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1a0d88699c3d3b8 with lease ID 0x95849f012978cc50: from storage DS-03ef22e9-098d-4974-baec-426a6b14708f node DatanodeRegistration(127.0.0.1:41607, datanodeUuid=5de375d8-4cc1-4ec3-8d8a-5a2117eca9f7, infoPort=33915, infoSecurePort=0, ipcPort=37769, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:56:08,484 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x628513354ac41493 with lease ID 0x95849f012978cc4f: Processing first storage report for DS-f85c48d7-8bbd-4128-b7d9-e76266d8dffe from datanode DatanodeRegistration(127.0.0.1:39843, datanodeUuid=d14f1d73-c28d-4c4a-b4f5-a79345f45ddf, infoPort=42177, infoSecurePort=0, ipcPort=35637, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347) 2024-12-11T22:56:08,484 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x628513354ac41493 with lease ID 0x95849f012978cc4f: from storage DS-f85c48d7-8bbd-4128-b7d9-e76266d8dffe node DatanodeRegistration(127.0.0.1:39843, datanodeUuid=d14f1d73-c28d-4c4a-b4f5-a79345f45ddf, infoPort=42177, infoSecurePort=0, ipcPort=35637, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:56:08,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1a0d88699c3d3b8 with lease ID 0x95849f012978cc50: Processing first storage report for DS-ada0d005-c587-4a42-b79b-d490cd53db5f from datanode DatanodeRegistration(127.0.0.1:41607, datanodeUuid=5de375d8-4cc1-4ec3-8d8a-5a2117eca9f7, infoPort=33915, infoSecurePort=0, ipcPort=37769, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347) 2024-12-11T22:56:08,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1a0d88699c3d3b8 with lease ID 0x95849f012978cc50: from storage DS-ada0d005-c587-4a42-b79b-d490cd53db5f node DatanodeRegistration(127.0.0.1:41607, datanodeUuid=5de375d8-4cc1-4ec3-8d8a-5a2117eca9f7, infoPort=33915, infoSecurePort=0, ipcPort=37769, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T22:56:08,580 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:56:08,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdc7294b779115b9f with lease ID 0x95849f012978cc51: Processing first storage report for DS-181d4275-97db-424d-8b8b-c39260b79c8d from datanode DatanodeRegistration(127.0.0.1:36751, datanodeUuid=3bb5c23c-c8a0-4c1d-bbd0-7ffb529ed3d8, infoPort=41275, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347) 2024-12-11T22:56:08,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc7294b779115b9f with lease ID 0x95849f012978cc51: from storage DS-181d4275-97db-424d-8b8b-c39260b79c8d node DatanodeRegistration(127.0.0.1:36751, datanodeUuid=3bb5c23c-c8a0-4c1d-bbd0-7ffb529ed3d8, infoPort=41275, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:56:08,586 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdc7294b779115b9f with lease ID 0x95849f012978cc51: Processing first storage report for DS-4e656c9f-d754-420d-be91-3f7a9b4401f9 from datanode DatanodeRegistration(127.0.0.1:36751, datanodeUuid=3bb5c23c-c8a0-4c1d-bbd0-7ffb529ed3d8, infoPort=41275, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347) 2024-12-11T22:56:08,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc7294b779115b9f with lease ID 0x95849f012978cc51: from storage DS-4e656c9f-d754-420d-be91-3f7a9b4401f9 node DatanodeRegistration(127.0.0.1:36751, datanodeUuid=3bb5c23c-c8a0-4c1d-bbd0-7ffb529ed3d8, infoPort=41275, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=897670576;c=1733957762347), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:56:08,670 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b 2024-12-11T22:56:08,900 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/zookeeper_0, clientPort=50092, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T22:56:08,924 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50092 2024-12-11T22:56:08,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:08,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:09,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741825_1001 (size=7) 2024-12-11T22:56:09,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741825_1001 (size=7) 2024-12-11T22:56:09,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741825_1001 (size=7) 2024-12-11T22:56:09,389 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f with version=8 2024-12-11T22:56:09,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/hbase-staging 2024-12-11T22:56:09,504 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T22:56:09,845 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:56:09,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:09,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:09,865 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:56:09,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:09,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:56:10,041 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T22:56:10,127 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T22:56:10,141 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T22:56:10,146 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:56:10,184 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 108988 (auto-detected) 2024-12-11T22:56:10,186 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T22:56:10,228 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42043 2024-12-11T22:56:10,269 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42043 connecting to ZooKeeper ensemble=127.0.0.1:50092 2024-12-11T22:56:10,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420430x0, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:56:10,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42043-0x100cb8db5a40000 connected 2024-12-11T22:56:10,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:10,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:10,559 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:56:10,563 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f, hbase.cluster.distributed=false 2024-12-11T22:56:10,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:56:10,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42043 2024-12-11T22:56:10,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42043 2024-12-11T22:56:10,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42043 2024-12-11T22:56:10,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42043 2024-12-11T22:56:10,647 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42043 2024-12-11T22:56:10,790 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:56:10,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:10,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:10,795 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:56:10,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:10,796 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:56:10,800 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:56:10,803 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:56:10,816 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37563 2024-12-11T22:56:10,821 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37563 connecting to ZooKeeper ensemble=127.0.0.1:50092 2024-12-11T22:56:10,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:10,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:10,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375630x0, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:56:10,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375630x0, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:56:10,875 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37563-0x100cb8db5a40001 connected 2024-12-11T22:56:10,878 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:56:10,889 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-11T22:56:10,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:56:10,899 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:56:10,905 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37563 2024-12-11T22:56:10,905 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37563 2024-12-11T22:56:10,906 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37563 2024-12-11T22:56:10,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37563 2024-12-11T22:56:10,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37563 2024-12-11T22:56:10,939 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:56:10,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:10,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:10,941 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:56:10,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:10,941 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:56:10,942 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:56:10,942 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:56:10,944 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33867 2024-12-11T22:56:10,947 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33867 connecting to ZooKeeper ensemble=127.0.0.1:50092 2024-12-11T22:56:10,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:10,955 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:10,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338670x0, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:56:10,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:338670x0, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:56:10,981 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:56:10,988 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33867-0x100cb8db5a40002 connected 2024-12-11T22:56:10,988 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-11T22:56:10,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:56:10,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:56:10,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33867 2024-12-11T22:56:11,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33867 2024-12-11T22:56:11,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33867 2024-12-11T22:56:11,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33867 2024-12-11T22:56:11,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33867 2024-12-11T22:56:11,024 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:56:11,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:11,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:11,024 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:56:11,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:56:11,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:56:11,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:56:11,025 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:56:11,028 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41635 2024-12-11T22:56:11,031 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41635 connecting to ZooKeeper ensemble=127.0.0.1:50092 2024-12-11T22:56:11,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:11,037 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:11,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416350x0, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:56:11,065 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41635-0x100cb8db5a40003 connected 2024-12-11T22:56:11,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:56:11,066 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:56:11,067 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-11T22:56:11,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:56:11,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:56:11,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41635 2024-12-11T22:56:11,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41635 2024-12-11T22:56:11,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41635 2024-12-11T22:56:11,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41635 2024-12-11T22:56:11,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41635 2024-12-11T22:56:11,093 DEBUG [M:0;b762025f20c5:42043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b762025f20c5:42043 2024-12-11T22:56:11,104 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b762025f20c5,42043,1733957769583 2024-12-11T22:56:11,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,142 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b762025f20c5,42043,1733957769583 2024-12-11T22:56:11,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:56:11,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:56:11,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,193 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T22:56:11,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:56:11,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,196 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b762025f20c5,42043,1733957769583 from backup master directory 2024-12-11T22:56:11,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b762025f20c5,42043,1733957769583 2024-12-11T22:56:11,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,211 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:56:11,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:56:11,211 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b762025f20c5,42043,1733957769583 2024-12-11T22:56:11,214 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T22:56:11,223 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T22:56:11,316 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/hbase.id] with ID: a632e75d-555c-4227-9b6b-9c0477277eba 2024-12-11T22:56:11,316 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.tmp/hbase.id 2024-12-11T22:56:11,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741826_1002 (size=42) 2024-12-11T22:56:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741826_1002 (size=42) 2024-12-11T22:56:11,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741826_1002 (size=42) 2024-12-11T22:56:11,389 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.tmp/hbase.id]:[hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/hbase.id] 2024-12-11T22:56:11,505 INFO [master/b762025f20c5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:11,511 INFO [master/b762025f20c5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T22:56:11,546 INFO [master/b762025f20c5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 32ms. 2024-12-11T22:56:11,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:11,614 WARN [IPC Server handler 1 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:11,615 WARN [IPC Server handler 1 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:11,615 WARN [IPC Server handler 1 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:11,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741827_1003 (size=196) 2024-12-11T22:56:11,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741827_1003 (size=196) 2024-12-11T22:56:11,701 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:56:11,704 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T22:56:11,725 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:56:11,731 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:56:11,768 WARN [IPC Server handler 3 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:11,768 WARN [IPC Server handler 3 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:11,768 WARN [IPC Server handler 3 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:11,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741828_1004 (size=1189) 2024-12-11T22:56:11,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741828_1004 (size=1189) 2024-12-11T22:56:11,802 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/data/master/store 2024-12-11T22:56:11,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741829_1005 (size=34) 2024-12-11T22:56:11,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741829_1005 (size=34) 2024-12-11T22:56:11,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741829_1005 (size=34) 2024-12-11T22:56:11,878 INFO [master/b762025f20c5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T22:56:11,883 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:11,885 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T22:56:11,885 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:56:11,886 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:56:11,888 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T22:56:11,889 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:56:11,889 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:56:11,891 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733957771885Disabling compacts and flushes for region at 1733957771885Disabling writes for close at 1733957771888 (+3 ms)Writing region close event to WAL at 1733957771889 (+1 ms)Closed at 1733957771889 2024-12-11T22:56:11,894 WARN [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/data/master/store/.initializing 2024-12-11T22:56:11,894 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583 2024-12-11T22:56:11,908 INFO [master/b762025f20c5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:56:11,945 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C42043%2C1733957769583, suffix=, logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583, archiveDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/oldWALs, maxLogs=10 2024-12-11T22:56:11,982 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950, exclude list is [], retry=0 2024-12-11T22:56:11,988 WARN [IPC Server handler 1 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:11,989 WARN [IPC Server handler 1 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:11,989 WARN [IPC Server handler 1 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:12,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41607,DS-03ef22e9-098d-4974-baec-426a6b14708f,DISK] 2024-12-11T22:56:12,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36751,DS-181d4275-97db-424d-8b8b-c39260b79c8d,DISK] 2024-12-11T22:56:12,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T22:56:12,038 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:12,039 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:12,039 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:12,039 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:12,039 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:12,039 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:12,040 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:12,040 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:12,089 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 2024-12-11T22:56:12,092 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41275:41275),(127.0.0.1/127.0.0.1:33915:33915)] 2024-12-11T22:56:12,093 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:56:12,093 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:12,099 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,100 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T22:56:12,230 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:12,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:12,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T22:56:12,257 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:12,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:12,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T22:56:12,264 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:12,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:12,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T22:56:12,272 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:12,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:12,275 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,288 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,290 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,300 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,301 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,305 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:56:12,321 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:56:12,327 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:56:12,328 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70908746, jitterRate=0.05662265419960022}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:56:12,335 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733957772122Initializing all the Stores at 1733957772132 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957772133 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957772143 (+10 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957772143Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957772144 (+1 ms)Cleaning up temporary data from old regions at 1733957772301 (+157 ms)Region opened successfully at 1733957772335 (+34 ms) 2024-12-11T22:56:12,337 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T22:56:12,397 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4606b4b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:56:12,435 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T22:56:12,452 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T22:56:12,452 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T22:56:12,457 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T22:56:12,476 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 18 msec 2024-12-11T22:56:12,484 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-12-11T22:56:12,484 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T22:56:12,535 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T22:56:12,550 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T22:56:12,566 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T22:56:12,570 INFO [master/b762025f20c5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T22:56:12,577 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T22:56:12,595 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T22:56:12,598 INFO [master/b762025f20c5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T22:56:12,614 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T22:56:12,625 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T22:56:12,628 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T22:56:12,637 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T22:56:12,682 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T22:56:12,693 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T22:56:12,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:56:12,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:56:12,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:56:12,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:56:12,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,719 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b762025f20c5,42043,1733957769583, sessionid=0x100cb8db5a40000, setting cluster-up flag (Was=false) 2024-12-11T22:56:12,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,808 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T22:56:12,829 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b762025f20c5,42043,1733957769583 2024-12-11T22:56:12,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:12,911 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T22:56:12,924 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b762025f20c5,42043,1733957769583 2024-12-11T22:56:12,936 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T22:56:12,996 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-11T22:56:12,997 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(746): ClusterId : a632e75d-555c-4227-9b6b-9c0477277eba 2024-12-11T22:56:13,002 INFO [master/b762025f20c5:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:13,002 INFO [master/b762025f20c5:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-11T22:56:13,006 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:56:13,007 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(746): ClusterId : a632e75d-555c-4227-9b6b-9c0477277eba 2024-12-11T22:56:13,007 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:56:13,011 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(746): ClusterId : a632e75d-555c-4227-9b6b-9c0477277eba 2024-12-11T22:56:13,011 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:56:13,038 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:56:13,038 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:56:13,039 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:56:13,040 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:56:13,050 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:56:13,051 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:56:13,069 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:56:13,069 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:56:13,070 DEBUG [RS:2;b762025f20c5:41635 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f20e658, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:56:13,071 DEBUG [RS:1;b762025f20c5:33867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b5e67f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:56:13,082 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:56:13,083 DEBUG [RS:0;b762025f20c5:37563 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@488d9aba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:56:13,097 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b762025f20c5:33867 2024-12-11T22:56:13,102 INFO [RS:1;b762025f20c5:33867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:56:13,102 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b762025f20c5:37563 2024-12-11T22:56:13,102 INFO [RS:1;b762025f20c5:33867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:56:13,102 INFO [RS:0;b762025f20c5:37563 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:56:13,102 INFO [RS:0;b762025f20c5:37563 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:56:13,101 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T22:56:13,103 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T22:56:13,103 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T22:56:13,103 INFO [RS:0;b762025f20c5:37563 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:13,103 INFO [RS:1;b762025f20c5:33867 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:13,103 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:56:13,103 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:56:13,108 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,42043,1733957769583 with port=33867, startcode=1733957770938 2024-12-11T22:56:13,108 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,42043,1733957769583 with port=37563, startcode=1733957770731 2024-12-11T22:56:13,121 INFO [master/b762025f20c5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T22:56:13,122 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b762025f20c5:41635 2024-12-11T22:56:13,122 INFO [RS:2;b762025f20c5:41635 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:56:13,122 INFO [RS:2;b762025f20c5:41635 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:56:13,123 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T22:56:13,123 INFO [RS:2;b762025f20c5:41635 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:13,124 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:56:13,124 DEBUG [RS:1;b762025f20c5:33867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:56:13,124 DEBUG [RS:0;b762025f20c5:37563 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:56:13,127 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,42043,1733957769583 with port=41635, startcode=1733957771023 2024-12-11T22:56:13,128 DEBUG [RS:2;b762025f20c5:41635 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:56:13,132 INFO [master/b762025f20c5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T22:56:13,140 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b762025f20c5,42043,1733957769583 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T22:56:13,153 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:56:13,153 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:56:13,153 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:56:13,153 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:56:13,153 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b762025f20c5:0, corePoolSize=10, maxPoolSize=10 2024-12-11T22:56:13,154 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,154 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:56:13,154 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,176 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:56:13,176 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T22:56:13,193 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46485, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:56:13,198 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44231, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:56:13,199 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40039, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:56:13,199 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733957803199 2024-12-11T22:56:13,201 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T22:56:13,202 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:?] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:?] 2024-12-11T22:56:13,203 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T22:56:13,204 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:13,205 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T22:56:13,214 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T22:56:13,215 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T22:56:13,215 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T22:56:13,215 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T22:56:13,218 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:?] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:?] 2024-12-11T22:56:13,220 WARN [IPC Server handler 3 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:13,220 WARN [IPC Server handler 3 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:13,220 WARN [IPC Server handler 3 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:13,222 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:?] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:?] 2024-12-11T22:56:13,227 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,231 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T22:56:13,233 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T22:56:13,233 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T22:56:13,239 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T22:56:13,240 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T22:56:13,247 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733957773242,5,FailOnTimeoutGroup] 2024-12-11T22:56:13,248 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733957773248,5,FailOnTimeoutGroup] 2024-12-11T22:56:13,249 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,249 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T22:56:13,251 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,251 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,258 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T22:56:13,258 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T22:56:13,258 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T22:56:13,258 WARN [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T22:56:13,258 WARN [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T22:56:13,258 WARN [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T22:56:13,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741831_1007 (size=1321) 2024-12-11T22:56:13,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741831_1007 (size=1321) 2024-12-11T22:56:13,269 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T22:56:13,270 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:13,279 WARN [IPC Server handler 4 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:13,279 WARN [IPC Server handler 4 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:13,280 WARN [IPC Server handler 4 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:13,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741832_1008 (size=32) 2024-12-11T22:56:13,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741832_1008 (size=32) 2024-12-11T22:56:13,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:13,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T22:56:13,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T22:56:13,299 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:13,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:13,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T22:56:13,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T22:56:13,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:13,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:13,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T22:56:13,310 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T22:56:13,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:13,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:13,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T22:56:13,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T22:56:13,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:13,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:13,318 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T22:56:13,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740 2024-12-11T22:56:13,322 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740 2024-12-11T22:56:13,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T22:56:13,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T22:56:13,328 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:56:13,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T22:56:13,341 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:56:13,342 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59239392, jitterRate=-0.1172642707824707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:56:13,347 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733957773293Initializing all the Stores at 1733957773295 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957773295Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957773296 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957773296Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957773296Cleaning up temporary data from old regions at 1733957773326 (+30 ms)Region opened successfully at 1733957773347 (+21 ms) 2024-12-11T22:56:13,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T22:56:13,348 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T22:56:13,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T22:56:13,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T22:56:13,349 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T22:56:13,351 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T22:56:13,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733957773348Disabling compacts and flushes for region at 1733957773348Disabling writes for close at 1733957773349 (+1 ms)Writing region close event to WAL at 1733957773350 (+1 ms)Closed at 1733957773350 2024-12-11T22:56:13,356 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:56:13,356 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T22:56:13,360 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,42043,1733957769583 with port=41635, startcode=1733957771023 2024-12-11T22:56:13,361 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,42043,1733957769583 with port=33867, startcode=1733957770938 2024-12-11T22:56:13,361 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,42043,1733957769583 with port=37563, startcode=1733957770731 2024-12-11T22:56:13,362 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,41635,1733957771023 2024-12-11T22:56:13,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T22:56:13,366 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,41635,1733957771023 2024-12-11T22:56:13,376 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,37563,1733957770731 2024-12-11T22:56:13,377 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T22:56:13,377 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:13,378 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45807 2024-12-11T22:56:13,378 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:56:13,380 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,37563,1733957770731 2024-12-11T22:56:13,384 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T22:56:13,386 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:13,386 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45807 2024-12-11T22:56:13,387 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:56:13,388 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,33867,1733957770938 2024-12-11T22:56:13,388 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,33867,1733957770938 2024-12-11T22:56:13,393 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:13,393 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45807 2024-12-11T22:56:13,393 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:56:13,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:56:13,473 DEBUG [RS:2;b762025f20c5:41635 {}] zookeeper.ZKUtil(111): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,41635,1733957771023 2024-12-11T22:56:13,473 WARN [RS:2;b762025f20c5:41635 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:56:13,474 DEBUG [RS:1;b762025f20c5:33867 {}] zookeeper.ZKUtil(111): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,33867,1733957770938 2024-12-11T22:56:13,474 WARN [RS:1;b762025f20c5:33867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:56:13,474 INFO [RS:1;b762025f20c5:33867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:56:13,475 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,33867,1733957770938 2024-12-11T22:56:13,480 INFO [RS:2;b762025f20c5:41635 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:56:13,480 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023 2024-12-11T22:56:13,481 DEBUG [RS:0;b762025f20c5:37563 {}] zookeeper.ZKUtil(111): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,37563,1733957770731 2024-12-11T22:56:13,482 WARN [RS:0;b762025f20c5:37563 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:56:13,482 INFO [RS:0;b762025f20c5:37563 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:56:13,482 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,37563,1733957770731 2024-12-11T22:56:13,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,33867,1733957770938] 2024-12-11T22:56:13,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,41635,1733957771023] 2024-12-11T22:56:13,486 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,37563,1733957770731] 2024-12-11T22:56:13,527 INFO [RS:2;b762025f20c5:41635 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:56:13,529 INFO [RS:0;b762025f20c5:37563 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:56:13,531 INFO [RS:1;b762025f20c5:33867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:56:13,535 WARN [b762025f20c5:42043 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T22:56:13,558 INFO [RS:0;b762025f20c5:37563 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:56:13,558 INFO [RS:2;b762025f20c5:41635 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:56:13,558 INFO [RS:1;b762025f20c5:33867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:56:13,568 INFO [RS:0;b762025f20c5:37563 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:56:13,568 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,578 INFO [RS:1;b762025f20c5:33867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:56:13,578 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,579 INFO [RS:2;b762025f20c5:41635 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:56:13,580 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,588 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:56:13,591 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:56:13,597 INFO [RS:1;b762025f20c5:33867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:56:13,597 INFO [RS:0;b762025f20c5:37563 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:56:13,599 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:56:13,599 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,600 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,600 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,600 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,601 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,601 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,601 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:56:13,601 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,601 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,602 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,602 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,602 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,602 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,602 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:56:13,602 DEBUG [RS:0;b762025f20c5:37563 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:56:13,603 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,604 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,604 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,604 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,604 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,605 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,606 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,606 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:56:13,606 DEBUG [RS:1;b762025f20c5:33867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:56:13,607 INFO [RS:2;b762025f20c5:41635 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:56:13,607 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,608 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,608 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,608 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,608 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,609 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,610 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:56:13,610 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:56:13,610 DEBUG [RS:2;b762025f20c5:41635 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:56:13,659 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,659 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,660 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,660 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,660 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,660 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,41635,1733957771023-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:56:13,672 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,672 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,672 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,673 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,673 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,673 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,37563,1733957770731-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:56:13,683 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,683 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,684 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,684 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,684 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,684 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,33867,1733957770938-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:56:13,690 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:56:13,693 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,41635,1733957771023-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,693 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,694 INFO [RS:2;b762025f20c5:41635 {}] regionserver.Replication(171): b762025f20c5,41635,1733957771023 started 2024-12-11T22:56:13,720 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:56:13,720 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,33867,1733957770938-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,721 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,722 INFO [RS:1;b762025f20c5:33867 {}] regionserver.Replication(171): b762025f20c5,33867,1733957770938 started 2024-12-11T22:56:13,741 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:56:13,741 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,37563,1733957770731-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,742 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,742 INFO [RS:0;b762025f20c5:37563 {}] regionserver.Replication(171): b762025f20c5,37563,1733957770731 started 2024-12-11T22:56:13,756 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,756 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,757 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,33867,1733957770938, RpcServer on b762025f20c5/172.17.0.2:33867, sessionid=0x100cb8db5a40002 2024-12-11T22:56:13,757 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,41635,1733957771023, RpcServer on b762025f20c5/172.17.0.2:41635, sessionid=0x100cb8db5a40003 2024-12-11T22:56:13,757 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:56:13,757 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:56:13,758 DEBUG [RS:1;b762025f20c5:33867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,33867,1733957770938 2024-12-11T22:56:13,758 DEBUG [RS:2;b762025f20c5:41635 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,41635,1733957771023 2024-12-11T22:56:13,758 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,33867,1733957770938' 2024-12-11T22:56:13,758 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,41635,1733957771023' 2024-12-11T22:56:13,758 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:56:13,758 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:56:13,759 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:56:13,759 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:56:13,760 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:56:13,760 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:56:13,760 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:56:13,760 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:56:13,760 DEBUG [RS:2;b762025f20c5:41635 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,41635,1733957771023 2024-12-11T22:56:13,760 DEBUG [RS:1;b762025f20c5:33867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,33867,1733957770938 2024-12-11T22:56:13,760 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,33867,1733957770938' 2024-12-11T22:56:13,760 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,41635,1733957771023' 2024-12-11T22:56:13,760 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:56:13,760 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:56:13,761 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:56:13,761 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:56:13,762 DEBUG [RS:1;b762025f20c5:33867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:56:13,762 DEBUG [RS:2;b762025f20c5:41635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:56:13,762 INFO [RS:1;b762025f20c5:33867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:56:13,762 INFO [RS:2;b762025f20c5:41635 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:56:13,762 INFO [RS:1;b762025f20c5:33867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:56:13,762 INFO [RS:2;b762025f20c5:41635 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:56:13,766 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:13,767 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,37563,1733957770731, RpcServer on b762025f20c5/172.17.0.2:37563, sessionid=0x100cb8db5a40001 2024-12-11T22:56:13,768 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:56:13,768 DEBUG [RS:0;b762025f20c5:37563 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,37563,1733957770731 2024-12-11T22:56:13,768 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,37563,1733957770731' 2024-12-11T22:56:13,768 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:56:13,769 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:56:13,775 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:56:13,775 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:56:13,776 DEBUG [RS:0;b762025f20c5:37563 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,37563,1733957770731 2024-12-11T22:56:13,776 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,37563,1733957770731' 2024-12-11T22:56:13,776 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:56:13,778 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:56:13,779 DEBUG [RS:0;b762025f20c5:37563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:56:13,779 INFO [RS:0;b762025f20c5:37563 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:56:13,779 INFO [RS:0;b762025f20c5:37563 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:56:13,873 INFO [RS:2;b762025f20c5:41635 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:56:13,875 INFO [RS:1;b762025f20c5:33867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:56:13,880 INFO [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C41635%2C1733957771023, suffix=, logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023, archiveDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs, maxLogs=32 2024-12-11T22:56:13,880 INFO [RS:0;b762025f20c5:37563 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:56:13,885 INFO [RS:1;b762025f20c5:33867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C33867%2C1733957770938, suffix=, logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,33867,1733957770938, archiveDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs, maxLogs=32 2024-12-11T22:56:13,896 INFO [RS:0;b762025f20c5:37563 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C37563%2C1733957770731, suffix=, logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,37563,1733957770731, archiveDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs, maxLogs=32 2024-12-11T22:56:13,905 DEBUG [RS:2;b762025f20c5:41635 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023/b762025f20c5%2C41635%2C1733957771023.1733957773882, exclude list is [], retry=0 2024-12-11T22:56:13,905 DEBUG [RS:1;b762025f20c5:33867 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,33867,1733957770938/b762025f20c5%2C33867%2C1733957770938.1733957773887, exclude list is [], retry=0 2024-12-11T22:56:13,909 WARN [IPC Server handler 1 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:13,909 WARN [IPC Server handler 2 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:13,910 WARN [IPC Server handler 2 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:13,910 WARN [IPC Server handler 1 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:13,910 WARN [IPC Server handler 2 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:13,910 WARN [IPC Server handler 1 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36751,DS-181d4275-97db-424d-8b8b-c39260b79c8d,DISK] 2024-12-11T22:56:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41607,DS-03ef22e9-098d-4974-baec-426a6b14708f,DISK] 2024-12-11T22:56:13,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41607,DS-03ef22e9-098d-4974-baec-426a6b14708f,DISK] 2024-12-11T22:56:13,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36751,DS-181d4275-97db-424d-8b8b-c39260b79c8d,DISK] 2024-12-11T22:56:13,936 DEBUG [RS:0;b762025f20c5:37563 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,37563,1733957770731/b762025f20c5%2C37563%2C1733957770731.1733957773898, exclude list is [], retry=0 2024-12-11T22:56:13,944 WARN [IPC Server handler 0 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:56:13,945 WARN [IPC Server handler 0 on default port 45807 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:56:13,945 WARN [IPC Server handler 0 on default port 45807 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:56:13,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36751,DS-181d4275-97db-424d-8b8b-c39260b79c8d,DISK] 2024-12-11T22:56:13,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41607,DS-03ef22e9-098d-4974-baec-426a6b14708f,DISK] 2024-12-11T22:56:13,956 INFO [RS:1;b762025f20c5:33867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,33867,1733957770938/b762025f20c5%2C33867%2C1733957770938.1733957773887 2024-12-11T22:56:14,040 DEBUG [RS:1;b762025f20c5:33867 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33915:33915),(127.0.0.1/127.0.0.1:41275:41275)] 2024-12-11T22:56:14,043 INFO [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023/b762025f20c5%2C41635%2C1733957771023.1733957773882 2024-12-11T22:56:14,047 INFO [RS:0;b762025f20c5:37563 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,37563,1733957770731/b762025f20c5%2C37563%2C1733957770731.1733957773898 2024-12-11T22:56:14,052 DEBUG [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41275:41275),(127.0.0.1/127.0.0.1:33915:33915)] 2024-12-11T22:56:14,065 DEBUG [RS:0;b762025f20c5:37563 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33915:33915),(127.0.0.1/127.0.0.1:41275:41275)] 2024-12-11T22:56:14,290 DEBUG [b762025f20c5:42043 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T22:56:14,302 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:56:14,317 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:56:14,317 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:56:14,317 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:56:14,318 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:56:14,318 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:56:14,318 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:56:14,318 INFO [b762025f20c5:42043 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:56:14,318 INFO [b762025f20c5:42043 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:56:14,318 INFO [b762025f20c5:42043 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:56:14,318 DEBUG [b762025f20c5:42043 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:56:14,331 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:14,350 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b762025f20c5,41635,1733957771023, state=OPENING 2024-12-11T22:56:14,394 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T22:56:14,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:14,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:14,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:14,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:14,423 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,423 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,423 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,426 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T22:56:14,426 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:56:14,645 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:56:14,648 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59187, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:56:14,665 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T22:56:14,665 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:56:14,666 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T22:56:14,673 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C41635%2C1733957771023.meta, suffix=.meta, logDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023, archiveDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs, maxLogs=32 2024-12-11T22:56:14,698 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023/b762025f20c5%2C41635%2C1733957771023.meta.1733957774676.meta, exclude list is [], retry=0 2024-12-11T22:56:14,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39843,DS-0c049be1-bad2-41a4-b16f-833e5e25857a,DISK] 2024-12-11T22:56:14,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41607,DS-03ef22e9-098d-4974-baec-426a6b14708f,DISK] 2024-12-11T22:56:14,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36751,DS-181d4275-97db-424d-8b8b-c39260b79c8d,DISK] 2024-12-11T22:56:14,748 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,41635,1733957771023/b762025f20c5%2C41635%2C1733957771023.meta.1733957774676.meta 2024-12-11T22:56:14,754 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42177:42177),(127.0.0.1/127.0.0.1:41275:41275),(127.0.0.1/127.0.0.1:33915:33915)] 2024-12-11T22:56:14,754 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:56:14,756 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-11T22:56:14,758 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:14,759 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T22:56:14,762 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T22:56:14,767 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T22:56:14,780 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T22:56:14,781 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:14,782 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T22:56:14,782 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T22:56:14,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T22:56:14,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T22:56:14,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:14,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:14,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T22:56:14,825 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T22:56:14,825 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:14,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:14,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T22:56:14,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T22:56:14,833 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:14,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:14,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T22:56:14,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T22:56:14,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:14,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:56:14,840 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T22:56:14,843 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740 2024-12-11T22:56:14,850 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740 2024-12-11T22:56:14,854 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T22:56:14,855 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T22:56:14,858 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:56:14,865 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T22:56:14,869 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71637133, jitterRate=0.06747646629810333}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:56:14,869 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T22:56:14,873 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733957774783Writing region info on filesystem at 1733957774784 (+1 ms)Initializing all the Stores at 1733957774787 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957774787Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957774791 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957774791Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957774791Cleaning up temporary data from old regions at 1733957774855 (+64 ms)Running coprocessor post-open hooks at 1733957774869 (+14 ms)Region opened successfully at 1733957774873 (+4 ms) 2024-12-11T22:56:14,884 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733957774628 2024-12-11T22:56:14,902 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T22:56:14,903 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T22:56:14,914 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:14,922 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b762025f20c5,41635,1733957771023, state=OPEN 2024-12-11T22:56:14,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:56:14,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:56:14,942 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,942 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:56:14,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:56:14,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,943 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:56:14,945 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b762025f20c5,41635,1733957771023 2024-12-11T22:56:14,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T22:56:14,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b762025f20c5,41635,1733957771023 in 517 msec 2024-12-11T22:56:14,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T22:56:14,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.5910 sec 2024-12-11T22:56:14,979 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:56:14,979 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T22:56:15,010 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:15,012 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:15,036 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:15,039 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39415, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:15,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 2.0670 sec 2024-12-11T22:56:15,093 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733957775092, completionTime=-1 2024-12-11T22:56:15,096 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T22:56:15,096 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T22:56:15,137 INFO [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T22:56:15,137 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733957835137 2024-12-11T22:56:15,137 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733957895137 2024-12-11T22:56:15,137 INFO [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 40 msec 2024-12-11T22:56:15,140 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:56:15,164 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,42043,1733957769583-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:15,165 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,42043,1733957769583-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:15,165 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,42043,1733957769583-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:15,167 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b762025f20c5:42043, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:15,168 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:15,183 DEBUG [master/b762025f20c5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T22:56:15,183 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:15,244 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.033sec 2024-12-11T22:56:15,247 INFO [master/b762025f20c5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T22:56:15,249 INFO [master/b762025f20c5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T22:56:15,251 INFO [master/b762025f20c5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T22:56:15,252 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T22:56:15,252 INFO [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T22:56:15,254 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,42043,1733957769583-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:56:15,255 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,42043,1733957769583-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T22:56:15,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@197dfdb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:15,330 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T22:56:15,331 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T22:56:15,394 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:15,398 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T22:56:15,399 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is b762025f20c5,42043,1733957769583 2024-12-11T22:56:15,405 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5f548f44 2024-12-11T22:56:15,409 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:15,418 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T22:56:15,421 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54839, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T22:56:15,430 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T22:56:15,460 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:15,465 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:15,466 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:15,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-11T22:56:15,466 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f6a4537, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:15,466 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:15,469 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:56:15,470 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:15,471 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-11T22:56:15,474 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:15,478 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:56:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:56:15,482 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:15,484 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:15,487 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46ba74ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:15,487 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:15,497 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:15,498 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:15,502 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b762025f20c5,42043,1733957769583 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/test.cache.data in system properties and HBase conf 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir in system properties and HBase conf 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T22:56:15,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:56:15,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:56:15,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T22:56:15,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/nfs.dump.dir in system properties and HBase conf 2024-12-11T22:56:15,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir in system properties and HBase conf 2024-12-11T22:56:15,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:56:15,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T22:56:15,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T22:56:15,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741837_1013 (size=349) 2024-12-11T22:56:15,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741837_1013 (size=349) 2024-12-11T22:56:15,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741837_1013 (size=349) 2024-12-11T22:56:15,538 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 83c7242f4212a8a5171da84f96da9bc6, NAME => 'hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:15,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741838_1014 (size=36) 2024-12-11T22:56:15,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741838_1014 (size=36) 2024-12-11T22:56:15,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741838_1014 (size=36) 2024-12-11T22:56:15,591 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:15,591 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 83c7242f4212a8a5171da84f96da9bc6, disabling compactions & flushes 2024-12-11T22:56:15,592 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:15,592 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:15,592 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. after waiting 0 ms 2024-12-11T22:56:15,592 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:15,592 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:15,592 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 83c7242f4212a8a5171da84f96da9bc6: Waiting for close lock at 1733957775591Disabling compacts and flushes for region at 1733957775591Disabling writes for close at 1733957775592 (+1 ms)Writing region close event to WAL at 1733957775592Closed at 1733957775592 2024-12-11T22:56:15,598 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:56:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:56:15,608 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733957775600"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957775600"}]},"ts":"1733957775600"} 2024-12-11T22:56:15,617 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T22:56:15,623 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:56:15,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957775623"}]},"ts":"1733957775623"} 2024-12-11T22:56:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741839_1015 (size=592039) 2024-12-11T22:56:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741839_1015 (size=592039) 2024-12-11T22:56:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741839_1015 (size=592039) 2024-12-11T22:56:15,635 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-11T22:56:15,636 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:56:15,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:56:15,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:56:15,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:56:15,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:56:15,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:56:15,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:56:15,641 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:56:15,641 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:56:15,641 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:56:15,641 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:56:15,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=83c7242f4212a8a5171da84f96da9bc6, ASSIGN}] 2024-12-11T22:56:15,660 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=83c7242f4212a8a5171da84f96da9bc6, ASSIGN 2024-12-11T22:56:15,666 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=83c7242f4212a8a5171da84f96da9bc6, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T22:56:15,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T22:56:15,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T22:56:15,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T22:56:15,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:56:15,822 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T22:56:15,823 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=83c7242f4212a8a5171da84f96da9bc6, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:56:15,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=83c7242f4212a8a5171da84f96da9bc6, ASSIGN because future has completed 2024-12-11T22:56:15,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83c7242f4212a8a5171da84f96da9bc6, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:56:16,040 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:56:16,073 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:56:16,089 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:16,091 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 83c7242f4212a8a5171da84f96da9bc6, NAME => 'hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:56:16,092 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. service=AccessControlService 2024-12-11T22:56:16,092 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:16,093 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,093 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:16,093 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,093 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,097 INFO [StoreOpener-83c7242f4212a8a5171da84f96da9bc6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,100 INFO [StoreOpener-83c7242f4212a8a5171da84f96da9bc6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83c7242f4212a8a5171da84f96da9bc6 columnFamilyName l 2024-12-11T22:56:16,100 DEBUG [StoreOpener-83c7242f4212a8a5171da84f96da9bc6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:16,103 INFO [StoreOpener-83c7242f4212a8a5171da84f96da9bc6-1 {}] regionserver.HStore(327): Store=83c7242f4212a8a5171da84f96da9bc6/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:16,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,107 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,108 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,112 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,112 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:56:16,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,139 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:56:16,148 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 83c7242f4212a8a5171da84f96da9bc6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65733027, jitterRate=-0.020501568913459778}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:56:16,148 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T22:56:16,150 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 83c7242f4212a8a5171da84f96da9bc6: Running coprocessor pre-open hook at 1733957776093Writing region info on filesystem at 1733957776093Initializing all the Stores at 1733957776096 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733957776097 (+1 ms)Cleaning up temporary data from old regions at 1733957776112 (+15 ms)Running coprocessor post-open hooks at 1733957776148 (+36 ms)Region opened successfully at 1733957776150 (+2 ms) 2024-12-11T22:56:16,156 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., pid=6, masterSystemTime=1733957776039 2024-12-11T22:56:16,164 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:16,164 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T22:56:16,171 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=83c7242f4212a8a5171da84f96da9bc6, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:56:16,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83c7242f4212a8a5171da84f96da9bc6, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:56:16,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T22:56:16,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 83c7242f4212a8a5171da84f96da9bc6, server=b762025f20c5,37563,1733957770731 in 353 msec 2024-12-11T22:56:16,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T22:56:16,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=83c7242f4212a8a5171da84f96da9bc6, ASSIGN in 575 msec 2024-12-11T22:56:16,242 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:56:16,244 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957776244"}]},"ts":"1733957776244"} 2024-12-11T22:56:16,257 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-11T22:56:16,262 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:56:16,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 804 msec 2024-12-11T22:56:16,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:56:16,640 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-11T22:56:16,663 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T22:56:16,664 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T22:56:16,665 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,42043,1733957769583-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:56:17,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741828_1004 (size=1189) 2024-12-11T22:56:17,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741827_1003 (size=196) 2024-12-11T22:56:17,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741831_1007 (size=1321) 2024-12-11T22:56:17,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741832_1008 (size=32) 2024-12-11T22:56:18,126 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:18,241 WARN [Thread-376 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:18,496 INFO [Thread-376 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:18,497 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T22:56:18,498 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:18,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:18,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:18,535 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:56:18,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:18,563 INFO [Thread-376 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:18,563 INFO [Thread-376 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:18,564 INFO [Thread-376 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:56:18,572 INFO [Thread-376 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@176bba66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:18,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c8c1793{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:18,573 INFO [Thread-376 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5512af0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:18,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45c2a887{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:18,898 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T22:56:18,898 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-11T22:56:18,899 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T22:56:18,901 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T22:56:18,985 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:19,520 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-11T22:56:19,524 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T22:56:19,571 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:20,083 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:20,119 INFO [Thread-376 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cbd050f{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-37305-hadoop-yarn-common-3_4_1_jar-_-any-7812306991357630960/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-11T22:56:20,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@429c4453{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-33061-hadoop-yarn-common-3_4_1_jar-_-any-9492624577002169562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-11T22:56:20,120 INFO [Thread-376 {}] server.AbstractConnector(333): Started ServerConnector@7dfd7ecf{HTTP/1.1, (http/1.1)}{localhost:37305} 2024-12-11T22:56:20,120 INFO [Thread-376 {}] server.Server(415): Started @22362ms 2024-12-11T22:56:20,128 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73ea0d15{HTTP/1.1, (http/1.1)}{localhost:33061} 2024-12-11T22:56:20,128 INFO [Time-limited test {}] server.Server(415): Started @22369ms 2024-12-11T22:56:20,300 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:56:20,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741841_1017 (size=5) 2024-12-11T22:56:20,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741841_1017 (size=5) 2024-12-11T22:56:20,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741841_1017 (size=5) 2024-12-11T22:56:20,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:56:20,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-11T22:56:20,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T22:56:20,487 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T22:56:20,489 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-11T22:56:20,489 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-11T22:56:20,491 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:56:20,491 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-11T22:56:20,491 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T22:56:20,491 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-11T22:56:20,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:56:20,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-11T22:56:20,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T22:56:20,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T22:56:20,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T22:56:20,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T22:56:21,384 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-11T22:56:21,391 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:21,457 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T22:56:21,458 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:21,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:21,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:21,518 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:56:21,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:21,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@647f66d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:21,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d9273c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:21,614 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-11T22:56:21,615 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T22:56:21,615 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T22:56:21,615 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T22:56:21,641 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:21,673 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:21,844 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:21,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d2db3c7{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-35901-hadoop-yarn-common-3_4_1_jar-_-any-15149676545920878706/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T22:56:21,859 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bf46fce{HTTP/1.1, (http/1.1)}{localhost:35901} 2024-12-11T22:56:21,860 INFO [Time-limited test {}] server.Server(415): Started @24101ms 2024-12-11T22:56:22,116 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-11T22:56:22,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:22,155 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T22:56:22,156 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:56:22,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:56:22,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:56:22,165 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:56:22,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:56:22,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6efd04a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:56:22,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e557003{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T22:56:22,252 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-11T22:56:22,253 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T22:56:22,253 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T22:56:22,253 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T22:56:22,262 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:22,268 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:22,380 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T22:56:22,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e88e382{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/java.io.tmpdir/jetty-localhost-40991-hadoop-yarn-common-3_4_1_jar-_-any-11749957502715212408/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T22:56:22,389 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f0170a{HTTP/1.1, (http/1.1)}{localhost:40991} 2024-12-11T22:56:22,389 INFO [Time-limited test {}] server.Server(415): Started @24630ms 2024-12-11T22:56:22,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-11T22:56:22,433 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:56:22,467 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=711, OpenFileDescriptor=759, MaxFileDescriptor=1048576, SystemLoadAverage=1220, ProcessCount=11, AvailableMemoryMB=2806 2024-12-11T22:56:22,469 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=711 is superior to 500 2024-12-11T22:56:22,474 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T22:56:22,479 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b762025f20c5,42043,1733957769583 2024-12-11T22:56:22,479 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6ee2bcf6 2024-12-11T22:56:22,479 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T22:56:22,483 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44084, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T22:56:22,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:56:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:22,500 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:56:22,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-11T22:56:22,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:56:22,506 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:56:22,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741842_1018 (size=458) 2024-12-11T22:56:22,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741842_1018 (size=458) 2024-12-11T22:56:22,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741842_1018 (size=458) 2024-12-11T22:56:22,578 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3b4d716d5e3b511feeb6d02f44c1b924, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:22,588 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5de470ac35389e049b548004f0da5513, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:22,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:56:22,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741843_1019 (size=83) 2024-12-11T22:56:22,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741843_1019 (size=83) 2024-12-11T22:56:22,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741843_1019 (size=83) 2024-12-11T22:56:22,669 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:22,669 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 3b4d716d5e3b511feeb6d02f44c1b924, disabling compactions & flushes 2024-12-11T22:56:22,669 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:22,669 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:22,669 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. after waiting 0 ms 2024-12-11T22:56:22,669 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:22,669 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:22,669 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3b4d716d5e3b511feeb6d02f44c1b924: Waiting for close lock at 1733957782669Disabling compacts and flushes for region at 1733957782669Disabling writes for close at 1733957782669Writing region close event to WAL at 1733957782669Closed at 1733957782669 2024-12-11T22:56:22,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741844_1020 (size=83) 2024-12-11T22:56:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741844_1020 (size=83) 2024-12-11T22:56:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741844_1020 (size=83) 2024-12-11T22:56:22,714 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:22,715 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 5de470ac35389e049b548004f0da5513, disabling compactions & flushes 2024-12-11T22:56:22,715 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:22,717 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:22,717 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. after waiting 0 ms 2024-12-11T22:56:22,717 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:22,717 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:22,718 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5de470ac35389e049b548004f0da5513: Waiting for close lock at 1733957782715Disabling compacts and flushes for region at 1733957782715Disabling writes for close at 1733957782717 (+2 ms)Writing region close event to WAL at 1733957782717Closed at 1733957782717 2024-12-11T22:56:22,722 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:56:22,722 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733957782722"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957782722"}]},"ts":"1733957782722"} 2024-12-11T22:56:22,723 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733957782722"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957782722"}]},"ts":"1733957782722"} 2024-12-11T22:56:22,777 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T22:56:22,781 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:56:22,781 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957782781"}]},"ts":"1733957782781"} 2024-12-11T22:56:22,788 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-11T22:56:22,789 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:56:22,793 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:56:22,793 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:56:22,793 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:56:22,793 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:56:22,794 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, ASSIGN}] 2024-12-11T22:56:22,799 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, ASSIGN 2024-12-11T22:56:22,801 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, ASSIGN 2024-12-11T22:56:22,803 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T22:56:22,807 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T22:56:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:56:22,954 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:56:22,955 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3b4d716d5e3b511feeb6d02f44c1b924, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:56:22,955 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=5de470ac35389e049b548004f0da5513, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:56:22,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, ASSIGN because future has completed 2024-12-11T22:56:22,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:56:22,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, ASSIGN because future has completed 2024-12-11T22:56:22,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5de470ac35389e049b548004f0da5513, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:56:23,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:56:23,160 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:23,160 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 3b4d716d5e3b511feeb6d02f44c1b924, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T22:56:23,161 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. service=AccessControlService 2024-12-11T22:56:23,162 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:23,162 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:56:23,162 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,162 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:23,162 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,162 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,167 INFO [StoreOpener-3b4d716d5e3b511feeb6d02f44c1b924-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,177 INFO [StoreOpener-3b4d716d5e3b511feeb6d02f44c1b924-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3b4d716d5e3b511feeb6d02f44c1b924 columnFamilyName cf 2024-12-11T22:56:23,183 DEBUG [StoreOpener-3b4d716d5e3b511feeb6d02f44c1b924-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:23,184 INFO [StoreOpener-3b4d716d5e3b511feeb6d02f44c1b924-1 {}] regionserver.HStore(327): Store=3b4d716d5e3b511feeb6d02f44c1b924/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:23,185 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,187 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,188 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,189 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,189 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,193 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,198 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57297, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:56:23,203 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:56:23,204 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 3b4d716d5e3b511feeb6d02f44c1b924; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70630145, jitterRate=0.05247117578983307}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:56:23,204 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,206 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 3b4d716d5e3b511feeb6d02f44c1b924: Running coprocessor pre-open hook at 1733957783162Writing region info on filesystem at 1733957783163 (+1 ms)Initializing all the Stores at 1733957783164 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957783165 (+1 ms)Cleaning up temporary data from old regions at 1733957783189 (+24 ms)Running coprocessor post-open hooks at 1733957783204 (+15 ms)Region opened successfully at 1733957783205 (+1 ms) 2024-12-11T22:56:23,208 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924., pid=10, masterSystemTime=1733957783147 2024-12-11T22:56:23,208 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:23,209 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 5de470ac35389e049b548004f0da5513, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T22:56:23,209 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. service=AccessControlService 2024-12-11T22:56:23,209 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:23,210 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,210 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:23,213 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,213 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,217 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:23,217 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:23,232 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3b4d716d5e3b511feeb6d02f44c1b924, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:56:23,233 INFO [StoreOpener-5de470ac35389e049b548004f0da5513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,237 INFO [StoreOpener-5de470ac35389e049b548004f0da5513-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5de470ac35389e049b548004f0da5513 columnFamilyName cf 2024-12-11T22:56:23,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:56:23,238 DEBUG [StoreOpener-5de470ac35389e049b548004f0da5513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:23,239 INFO [StoreOpener-5de470ac35389e049b548004f0da5513-1 {}] regionserver.HStore(327): Store=5de470ac35389e049b548004f0da5513/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:23,241 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,243 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,244 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,245 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,245 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,249 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,253 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-12-11T22:56:23,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924, server=b762025f20c5,37563,1733957770731 in 275 msec 2024-12-11T22:56:23,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, ASSIGN in 457 msec 2024-12-11T22:56:23,265 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:56:23,266 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 5de470ac35389e049b548004f0da5513; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64868169, jitterRate=-0.033388957381248474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:56:23,266 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,267 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 5de470ac35389e049b548004f0da5513: Running coprocessor pre-open hook at 1733957783214Writing region info on filesystem at 1733957783214Initializing all the Stores at 1733957783231 (+17 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957783231Cleaning up temporary data from old regions at 1733957783245 (+14 ms)Running coprocessor post-open hooks at 1733957783266 (+21 ms)Region opened successfully at 1733957783266 2024-12-11T22:56:23,269 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513., pid=11, masterSystemTime=1733957783161 2024-12-11T22:56:23,285 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:23,286 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:23,292 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=5de470ac35389e049b548004f0da5513, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:56:23,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5de470ac35389e049b548004f0da5513, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:56:23,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-12-11T22:56:23,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 5de470ac35389e049b548004f0da5513, server=b762025f20c5,33867,1733957770938 in 317 msec 2024-12-11T22:56:23,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-11T22:56:23,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, ASSIGN in 537 msec 2024-12-11T22:56:23,341 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:56:23,342 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957783341"}]},"ts":"1733957783341"} 2024-12-11T22:56:23,349 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-11T22:56:23,353 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:56:23,360 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-11T22:56:23,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:56:23,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52313, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,385 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:23,385 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:23,386 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,407 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53677, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:56:23,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,416 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:23,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:23,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:56:23,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:23,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:23,500 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:23,500 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:23,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:23,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:23,510 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 1.0140 sec 2024-12-11T22:56:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:56:23,646 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:56:23,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:23,657 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:23,658 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:23,659 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:56:23,662 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:23,680 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:23,686 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,692 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,695 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,697 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:23,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T22:56:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957783710 (current time:1733957783710). 2024-12-11T22:56:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:56:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T22:56:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:56:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f319dd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:23,715 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:23,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:23,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:23,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4700879f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:23,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:23,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:23,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:23,717 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44104, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:23,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab77c14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:23,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:23,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,723 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:56:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:56:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:23,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:23,750 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34104400, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:23,755 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:23,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:23,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:23,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18d1a111, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:23,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:23,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:23,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:23,759 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:23,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@600668c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:23,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:23,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,768 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50516, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:56:23,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:23,776 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43214, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:23,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:56:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:56:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:23,780 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T22:56:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:56:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T22:56:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-11T22:56:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T22:56:23,802 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:56:23,810 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:56:23,837 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:56:23,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741845_1021 (size=215) 2024-12-11T22:56:23,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741845_1021 (size=215) 2024-12-11T22:56:23,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741845_1021 (size=215) 2024-12-11T22:56:23,890 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:56:23,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513}] 2024-12-11T22:56:23,900 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:23,903 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T22:56:24,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-11T22:56:24,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-11T22:56:24,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:24,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:24,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5de470ac35389e049b548004f0da5513: 2024-12-11T22:56:24,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 3b4d716d5e3b511feeb6d02f44c1b924: 2024-12-11T22:56:24,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T22:56:24,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T22:56:24,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:24,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:24,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:56:24,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:56:24,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:56:24,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:56:24,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T22:56:24,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741847_1023 (size=86) 2024-12-11T22:56:24,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741846_1022 (size=86) 2024-12-11T22:56:24,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741847_1023 (size=86) 2024-12-11T22:56:24,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741847_1023 (size=86) 2024-12-11T22:56:24,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741846_1022 (size=86) 2024-12-11T22:56:24,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:24,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741846_1022 (size=86) 2024-12-11T22:56:24,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:24,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-11T22:56:24,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-11T22:56:24,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-11T22:56:24,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-11T22:56:24,137 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:24,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:24,137 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:24,137 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:24,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513 in 245 msec 2024-12-11T22:56:24,145 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-11T22:56:24,146 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:56:24,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924 in 245 msec 2024-12-11T22:56:24,149 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:56:24,153 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:56:24,154 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:56:24,155 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:24,156 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T22:56:24,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741848_1024 (size=78) 2024-12-11T22:56:24,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741848_1024 (size=78) 2024-12-11T22:56:24,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741848_1024 (size=78) 2024-12-11T22:56:24,205 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:56:24,205 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:24,208 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741849_1025 (size=713) 2024-12-11T22:56:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741849_1025 (size=713) 2024-12-11T22:56:24,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741849_1025 (size=713) 2024-12-11T22:56:24,255 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:56:24,274 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:56:24,276 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:24,280 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:56:24,280 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-11T22:56:24,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 489 msec 2024-12-11T22:56:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T22:56:24,426 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:56:24,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:56:24,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:56:24,459 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:24,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:24,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:24,468 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:56:24,474 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:24,487 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:24,506 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:24,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T22:56:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957784512 (current time:1733957784512). 2024-12-11T22:56:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:56:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T22:56:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:56:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3259e7dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:24,514 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:24,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:24,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:24,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1af8ecb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:24,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:24,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:24,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:24,517 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44118, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:24,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d6d5386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:24,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:24,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:24,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:24,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:56:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:56:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:24,524 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f5e68d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:24,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:24,526 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:24,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:24,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:24,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eeca96b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:24,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:24,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:24,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:24,529 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44140, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:24,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20edfc54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:24,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:24,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:24,536 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:24,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:56:24,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:24,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:24,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:56:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:56:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T22:56:24,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:56:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T22:56:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-11T22:56:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T22:56:24,550 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:56:24,552 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:56:24,557 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:56:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741850_1026 (size=210) 2024-12-11T22:56:24,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741850_1026 (size=210) 2024-12-11T22:56:24,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741850_1026 (size=210) 2024-12-11T22:56:24,584 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:56:24,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513}] 2024-12-11T22:56:24,588 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:24,588 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T22:56:24,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-11T22:56:24,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-11T22:56:24,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:24,749 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 5de470ac35389e049b548004f0da5513 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T22:56:24,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:24,768 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 3b4d716d5e3b511feeb6d02f44c1b924 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T22:56:24,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924 is 71, key is 00b95f404a8f56b5d05e85f5c0c90407/cf:q/1733957784449/Put/seqid=0 2024-12-11T22:56:24,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513 is 71, key is 136ffd187f885c81b457aa575c829c06/cf:q/1733957784453/Put/seqid=0 2024-12-11T22:56:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T22:56:24,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741851_1027 (size=8241) 2024-12-11T22:56:24,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741851_1027 (size=8241) 2024-12-11T22:56:24,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741851_1027 (size=8241) 2024-12-11T22:56:24,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:24,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741852_1028 (size=5032) 2024-12-11T22:56:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741852_1028 (size=5032) 2024-12-11T22:56:24,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741852_1028 (size=5032) 2024-12-11T22:56:24,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:24,985 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:24,986 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513 2024-12-11T22:56:24,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/.tmp/cf/968f6507c62d41fd9dc4e6223511e3e4, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=3b4d716d5e3b511feeb6d02f44c1b924] 2024-12-11T22:56:24,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/.tmp/cf/5276c58bd549450694c2a80aac16cb09, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=5de470ac35389e049b548004f0da5513] 2024-12-11T22:56:25,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/.tmp/cf/5276c58bd549450694c2a80aac16cb09 is 224, key is 176a8875a83ff92bf58fb43fb34797b52/cf:q/1733957784453/Put/seqid=0 2024-12-11T22:56:25,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/.tmp/cf/968f6507c62d41fd9dc4e6223511e3e4 is 224, key is 0e1b42e9794a67c7e23d812cf5d564756/cf:q/1733957784449/Put/seqid=0 2024-12-11T22:56:25,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741853_1029 (size=5754) 2024-12-11T22:56:25,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741853_1029 (size=5754) 2024-12-11T22:56:25,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741853_1029 (size=5754) 2024-12-11T22:56:25,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/.tmp/cf/968f6507c62d41fd9dc4e6223511e3e4 2024-12-11T22:56:25,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741854_1030 (size=15937) 2024-12-11T22:56:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741854_1030 (size=15937) 2024-12-11T22:56:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741854_1030 (size=15937) 2024-12-11T22:56:25,093 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/.tmp/cf/5276c58bd549450694c2a80aac16cb09 2024-12-11T22:56:25,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/.tmp/cf/968f6507c62d41fd9dc4e6223511e3e4 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf/968f6507c62d41fd9dc4e6223511e3e4 2024-12-11T22:56:25,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/.tmp/cf/5276c58bd549450694c2a80aac16cb09 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf/5276c58bd549450694c2a80aac16cb09 2024-12-11T22:56:25,129 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf/968f6507c62d41fd9dc4e6223511e3e4, entries=2, sequenceid=6, filesize=5.6 K 2024-12-11T22:56:25,134 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf/5276c58bd549450694c2a80aac16cb09, entries=48, sequenceid=6, filesize=15.6 K 2024-12-11T22:56:25,140 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 3b4d716d5e3b511feeb6d02f44c1b924 in 365ms, sequenceid=6, compaction requested=false 2024-12-11T22:56:25,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-11T22:56:25,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 3b4d716d5e3b511feeb6d02f44c1b924: 2024-12-11T22:56:25,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T22:56:25,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:56:25,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf/968f6507c62d41fd9dc4e6223511e3e4] hfiles 2024-12-11T22:56:25,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 5de470ac35389e049b548004f0da5513 in 393ms, sequenceid=6, compaction requested=false 2024-12-11T22:56:25,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 5de470ac35389e049b548004f0da5513: 2024-12-11T22:56:25,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T22:56:25,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf/968f6507c62d41fd9dc4e6223511e3e4 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:56:25,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf/5276c58bd549450694c2a80aac16cb09] hfiles 2024-12-11T22:56:25,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf/5276c58bd549450694c2a80aac16cb09 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T22:56:25,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741855_1031 (size=125) 2024-12-11T22:56:25,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741855_1031 (size=125) 2024-12-11T22:56:25,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741855_1031 (size=125) 2024-12-11T22:56:25,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:56:25,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-11T22:56:25,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-11T22:56:25,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:25,197 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513 2024-12-11T22:56:25,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5de470ac35389e049b548004f0da5513 in 620 msec 2024-12-11T22:56:25,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741856_1032 (size=125) 2024-12-11T22:56:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741856_1032 (size=125) 2024-12-11T22:56:25,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741856_1032 (size=125) 2024-12-11T22:56:25,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:56:25,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-11T22:56:25,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-11T22:56:25,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:25,217 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:25,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-11T22:56:25,222 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:56:25,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924 in 634 msec 2024-12-11T22:56:25,224 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:56:25,231 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:56:25,232 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:56:25,232 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:25,235 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924] hfiles 2024-12-11T22:56:25,235 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513 2024-12-11T22:56:25,236 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:56:25,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741857_1033 (size=309) 2024-12-11T22:56:25,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741857_1033 (size=309) 2024-12-11T22:56:25,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741857_1033 (size=309) 2024-12-11T22:56:25,275 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:56:25,275 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,277 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741858_1034 (size=1023) 2024-12-11T22:56:25,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741858_1034 (size=1023) 2024-12-11T22:56:25,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741858_1034 (size=1023) 2024-12-11T22:56:25,355 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:56:25,386 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:56:25,387 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,390 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:56:25,390 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-11T22:56:25,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 844 msec 2024-12-11T22:56:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T22:56:25,686 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:56:25,722 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:56:25,724 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:56:25,728 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:56:25,729 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:56:25,730 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T22:56:25,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:56:25,734 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37563 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T22:56:25,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50544, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:56:25,813 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T22:56:25,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:56:25,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:25,823 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:56:25,823 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:25,828 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:56:25,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-11T22:56:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T22:56:25,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741859_1035 (size=390) 2024-12-11T22:56:25,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741859_1035 (size=390) 2024-12-11T22:56:25,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741859_1035 (size=390) 2024-12-11T22:56:25,903 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 433c5483abc2344dc88ce6d50b97ed48, NAME => 'testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T22:56:25,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741860_1036 (size=75) 2024-12-11T22:56:25,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741860_1036 (size=75) 2024-12-11T22:56:25,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741860_1036 (size=75) 2024-12-11T22:56:25,951 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:25,951 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 433c5483abc2344dc88ce6d50b97ed48, disabling compactions & flushes 2024-12-11T22:56:25,952 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:25,952 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:25,952 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. after waiting 0 ms 2024-12-11T22:56:25,952 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:25,952 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:25,952 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 433c5483abc2344dc88ce6d50b97ed48: Waiting for close lock at 1733957785951Disabling compacts and flushes for region at 1733957785951Disabling writes for close at 1733957785952 (+1 ms)Writing region close event to WAL at 1733957785952Closed at 1733957785952 2024-12-11T22:56:25,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:56:25,955 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733957785955"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957785955"}]},"ts":"1733957785955"} 2024-12-11T22:56:25,961 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T22:56:25,962 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:56:25,963 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957785963"}]},"ts":"1733957785963"} 2024-12-11T22:56:25,966 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-11T22:56:25,967 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:56:25,972 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:56:25,972 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:56:25,972 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:56:25,972 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:56:25,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, ASSIGN}] 2024-12-11T22:56:25,976 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, ASSIGN 2024-12-11T22:56:25,978 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T22:56:26,129 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T22:56:26,129 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=433c5483abc2344dc88ce6d50b97ed48, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:26,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, ASSIGN because future has completed 2024-12-11T22:56:26,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 433c5483abc2344dc88ce6d50b97ed48, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:56:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T22:56:26,297 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:26,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 433c5483abc2344dc88ce6d50b97ed48, NAME => 'testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:56:26,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. service=AccessControlService 2024-12-11T22:56:26,297 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:26,298 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,298 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:26,298 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,298 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,300 INFO [StoreOpener-433c5483abc2344dc88ce6d50b97ed48-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,302 INFO [StoreOpener-433c5483abc2344dc88ce6d50b97ed48-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 433c5483abc2344dc88ce6d50b97ed48 columnFamilyName cf 2024-12-11T22:56:26,302 DEBUG [StoreOpener-433c5483abc2344dc88ce6d50b97ed48-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:26,303 INFO [StoreOpener-433c5483abc2344dc88ce6d50b97ed48-1 {}] regionserver.HStore(327): Store=433c5483abc2344dc88ce6d50b97ed48/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:26,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,304 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,304 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,305 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,305 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,307 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,309 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:56:26,310 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 433c5483abc2344dc88ce6d50b97ed48; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63205267, jitterRate=-0.05816812813282013}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:56:26,310 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:26,311 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 433c5483abc2344dc88ce6d50b97ed48: Running coprocessor pre-open hook at 1733957786298Writing region info on filesystem at 1733957786298Initializing all the Stores at 1733957786299 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957786299Cleaning up temporary data from old regions at 1733957786305 (+6 ms)Running coprocessor post-open hooks at 1733957786310 (+5 ms)Region opened successfully at 1733957786311 (+1 ms) 2024-12-11T22:56:26,313 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., pid=20, masterSystemTime=1733957786291 2024-12-11T22:56:26,316 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:26,316 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:26,317 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=433c5483abc2344dc88ce6d50b97ed48, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:26,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 433c5483abc2344dc88ce6d50b97ed48, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:56:26,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-11T22:56:26,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 433c5483abc2344dc88ce6d50b97ed48, server=b762025f20c5,41635,1733957771023 in 188 msec 2024-12-11T22:56:26,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-11T22:56:26,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, ASSIGN in 353 msec 2024-12-11T22:56:26,330 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:56:26,330 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957786330"}]},"ts":"1733957786330"} 2024-12-11T22:56:26,333 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-11T22:56:26,334 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:56:26,334 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-11T22:56:26,338 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T22:56:26,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:26,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:26,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:26,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:56:26,359 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,359 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:56:26,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 540 msec 2024-12-11T22:56:26,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T22:56:26,456 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:56:26,457 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:26,460 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:56:28,667 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:56:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741861_1037 (size=134217728) 2024-12-11T22:56:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741861_1037 (size=134217728) 2024-12-11T22:56:29,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741861_1037 (size=134217728) 2024-12-11T22:56:30,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:30,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T22:56:30,121 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:30,121 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T22:56:31,528 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-11T22:56:37,501 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:38920 [Receiving block BP-1399083668-172.17.0.2-1733957762347:blk_1073741862_1038] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 5285ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/, blockId=1073741862, seqno=4016 2024-12-11T22:56:37,501 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:45526 [Receiving block BP-1399083668-172.17.0.2-1733957762347:blk_1073741862_1038] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 5281ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/, blockId=1073741862, seqno=4015 2024-12-11T22:56:37,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741862_1038 (size=134217728) 2024-12-11T22:56:37,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741862_1038 (size=134217728) 2024-12-11T22:56:37,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741862_1038 (size=134217728) 2024-12-11T22:56:38,049 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733957786466/Put/seqid=0 2024-12-11T22:56:38,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741863_1039 (size=51979256) 2024-12-11T22:56:38,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741863_1039 (size=51979256) 2024-12-11T22:56:38,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741863_1039 (size=51979256) 2024-12-11T22:56:38,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c12dab9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:38,165 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:38,165 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:38,167 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:38,168 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:38,168 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:38,169 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e91c26c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:38,169 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:38,169 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:38,169 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:38,172 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:38,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18f00c1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:38,175 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:38,178 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:38,179 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:38,181 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:38,195 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:45807/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-11T22:56:38,196 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T22:56:38,198 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is b762025f20c5,42043,1733957769583 2024-12-11T22:56:38,198 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7256331 2024-12-11T22:56:38,198 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T22:56:38,201 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37066, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T22:56:38,209 WARN [IPC Server handler 2 on default port 45807 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-11T22:56:38,215 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., hostname=b762025f20c5,41635,1733957771023, seqNum=2] 2024-12-11T22:56:38,226 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:38,243 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:45807/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-11T22:56:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:38,274 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40109, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-11T22:56:38,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:?] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:?] 2024-12-11T22:56:38,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:40109 deadline: 1733957858274, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-11T22:56:38,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:?] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:?] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:?] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:56:38,285 WARN [IPC Server handler 2 on default port 45807 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-11T22:56:38,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:45807/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/output/cf/test_file for inclusion in 433c5483abc2344dc88ce6d50b97ed48/cf 2024-12-11T22:56:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-11T22:56:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-11T22:56:38,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:45807/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-11T22:56:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(2603): Flush status journal for 433c5483abc2344dc88ce6d50b97ed48: 2024-12-11T22:56:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:45807/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/output/cf/test_file to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/staging/jenkins__testExportFileSystemStateWithSplitRegion__28s1g7fmjtlgep4c00m1t9cpjnpv3frifkhk3c18dp7uqa0e4t992sjhcp92jm4c/cf/test_file 2024-12-11T22:56:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/staging/jenkins__testExportFileSystemStateWithSplitRegion__28s1g7fmjtlgep4c00m1t9cpjnpv3frifkhk3c18dp7uqa0e4t992sjhcp92jm4c/cf/test_file as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ 2024-12-11T22:56:38,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/staging/jenkins__testExportFileSystemStateWithSplitRegion__28s1g7fmjtlgep4c00m1t9cpjnpv3frifkhk3c18dp7uqa0e4t992sjhcp92jm4c/cf/test_file into 433c5483abc2344dc88ce6d50b97ed48/cf as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ - updating store file list. 2024-12-11T22:56:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 29c70957a4e4481884ed3c088884cae5_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T22:56:38,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ into 433c5483abc2344dc88ce6d50b97ed48/cf 2024-12-11T22:56:38,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/staging/jenkins__testExportFileSystemStateWithSplitRegion__28s1g7fmjtlgep4c00m1t9cpjnpv3frifkhk3c18dp7uqa0e4t992sjhcp92jm4c/cf/test_file into 433c5483abc2344dc88ce6d50b97ed48/cf (new location: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_) 2024-12-11T22:56:38,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/staging/jenkins__testExportFileSystemStateWithSplitRegion__28s1g7fmjtlgep4c00m1t9cpjnpv3frifkhk3c18dp7uqa0e4t992sjhcp92jm4c/cf/test_file 2024-12-11T22:56:38,385 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T22:56:38,385 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:56:38,385 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:38,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:38,386 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:38,386 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:38,388 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., hostname=b762025f20c5,41635,1733957771023, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., hostname=b762025f20c5,41635,1733957771023, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=b762025f20c5:41635 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-11T22:56:38,388 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., hostname=b762025f20c5,41635,1733957771023, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-11T22:56:38,389 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., hostname=b762025f20c5,41635,1733957771023, seqNum=2 from cache 2024-12-11T22:56:38,389 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-11T22:56:38,394 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48., hostname=b762025f20c5,41635,1733957771023, seqNum=2] 2024-12-11T22:56:38,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b762025f20c5,41635,1733957771023 2024-12-11T22:56:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=433c5483abc2344dc88ce6d50b97ed48, daughterA=19b26f780d95a771738dd9b93c240d96, daughterB=fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:38,419 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=433c5483abc2344dc88ce6d50b97ed48, daughterA=19b26f780d95a771738dd9b93c240d96, daughterB=fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:38,419 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=433c5483abc2344dc88ce6d50b97ed48, daughterA=19b26f780d95a771738dd9b93c240d96, daughterB=fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:38,419 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=433c5483abc2344dc88ce6d50b97ed48, daughterA=19b26f780d95a771738dd9b93c240d96, daughterB=fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T22:56:38,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, UNASSIGN}] 2024-12-11T22:56:38,429 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, UNASSIGN 2024-12-11T22:56:38,431 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=433c5483abc2344dc88ce6d50b97ed48, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:38,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, UNASSIGN because future has completed 2024-12-11T22:56:38,435 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T22:56:38,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 433c5483abc2344dc88ce6d50b97ed48, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:56:38,497 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b762025f20c5:41635 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:?] ... 34 more 2024-12-11T22:56:38,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T22:56:38,607 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:38,608 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T22:56:38,609 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 433c5483abc2344dc88ce6d50b97ed48, disabling compactions & flushes 2024-12-11T22:56:38,609 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:38,609 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:38,609 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. after waiting 0 ms 2024-12-11T22:56:38,609 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:38,662 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-11T22:56:38,666 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:56:38,667 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48. 2024-12-11T22:56:38,667 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 433c5483abc2344dc88ce6d50b97ed48: Waiting for close lock at 1733957798609Running coprocessor pre-close hooks at 1733957798609Disabling compacts and flushes for region at 1733957798609Disabling writes for close at 1733957798609Writing region close event to WAL at 1733957798636 (+27 ms)Running coprocessor post-close hooks at 1733957798663 (+27 ms)Closed at 1733957798667 (+4 ms) 2024-12-11T22:56:38,679 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:56:38,680 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:38,681 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=433c5483abc2344dc88ce6d50b97ed48, regionState=CLOSED 2024-12-11T22:56:38,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 433c5483abc2344dc88ce6d50b97ed48, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:56:38,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-11T22:56:38,694 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 433c5483abc2344dc88ce6d50b97ed48, server=b762025f20c5,41635,1733957771023 in 251 msec 2024-12-11T22:56:38,700 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-11T22:56:38,700 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=433c5483abc2344dc88ce6d50b97ed48, UNASSIGN in 266 msec 2024-12-11T22:56:38,724 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:38,730 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=433c5483abc2344dc88ce6d50b97ed48, threads=1 2024-12-11T22:56:38,732 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ for region: 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:38,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T22:56:38,758 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 29c70957a4e4481884ed3c088884cae5_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T22:56:38,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741864_1040 (size=21) 2024-12-11T22:56:38,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741864_1040 (size=21) 2024-12-11T22:56:38,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741864_1040 (size=21) 2024-12-11T22:56:38,790 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 29c70957a4e4481884ed3c088884cae5_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T22:56:38,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741865_1041 (size=21) 2024-12-11T22:56:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741865_1041 (size=21) 2024-12-11T22:56:38,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741865_1041 (size=21) 2024-12-11T22:56:38,814 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ for region: 433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:56:38,816 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 433c5483abc2344dc88ce6d50b97ed48 Daughter A: [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48] storefiles, Daughter B: [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48] storefiles. 2024-12-11T22:56:38,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741866_1042 (size=76) 2024-12-11T22:56:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741866_1042 (size=76) 2024-12-11T22:56:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741866_1042 (size=76) 2024-12-11T22:56:38,834 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:38,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741867_1043 (size=76) 2024-12-11T22:56:38,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741867_1043 (size=76) 2024-12-11T22:56:38,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741867_1043 (size=76) 2024-12-11T22:56:38,860 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:38,873 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-11T22:56:38,876 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-11T22:56:38,879 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733957798879"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733957798879"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733957798879"}]},"ts":"1733957798879"} 2024-12-11T22:56:38,880 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733957798879"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957798879"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733957798879"}]},"ts":"1733957798879"} 2024-12-11T22:56:38,880 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733957798879"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957798879"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733957798879"}]},"ts":"1733957798879"} 2024-12-11T22:56:38,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, ASSIGN}] 2024-12-11T22:56:38,897 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, ASSIGN 2024-12-11T22:56:38,897 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, ASSIGN 2024-12-11T22:56:38,898 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, ASSIGN; state=SPLITTING_NEW, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T22:56:38,899 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, ASSIGN; state=SPLITTING_NEW, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T22:56:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T22:56:39,049 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:56:39,049 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=19b26f780d95a771738dd9b93c240d96, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:39,049 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=fa6198c2afe57e993d9d453d55d14dcf, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:39,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, ASSIGN because future has completed 2024-12-11T22:56:39,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19b26f780d95a771738dd9b93c240d96, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:56:39,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, ASSIGN because future has completed 2024-12-11T22:56:39,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa6198c2afe57e993d9d453d55d14dcf, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:56:39,210 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:56:39,210 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 19b26f780d95a771738dd9b93c240d96, NAME => 'testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.', STARTKEY => '', ENDKEY => '5'} 2024-12-11T22:56:39,210 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. service=AccessControlService 2024-12-11T22:56:39,210 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:39,211 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,211 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:39,211 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,211 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,213 INFO [StoreOpener-19b26f780d95a771738dd9b93c240d96-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,214 INFO [StoreOpener-19b26f780d95a771738dd9b93c240d96-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 19b26f780d95a771738dd9b93c240d96 columnFamilyName cf 2024-12-11T22:56:39,214 DEBUG [StoreOpener-19b26f780d95a771738dd9b93c240d96-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:39,227 DEBUG [StoreFileOpener-19b26f780d95a771738dd9b93c240d96-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48: NONE, but ROW specified in column family configuration 2024-12-11T22:56:39,242 DEBUG [StoreOpener-19b26f780d95a771738dd9b93c240d96-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_-bottom 2024-12-11T22:56:39,243 INFO [StoreOpener-19b26f780d95a771738dd9b93c240d96-1 {}] regionserver.HStore(327): Store=19b26f780d95a771738dd9b93c240d96/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:39,244 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,245 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,247 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,248 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,248 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,251 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,253 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 19b26f780d95a771738dd9b93c240d96; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72658933, jitterRate=0.08270247280597687}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:56:39,253 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,254 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 19b26f780d95a771738dd9b93c240d96: Running coprocessor pre-open hook at 1733957799211Writing region info on filesystem at 1733957799211Initializing all the Stores at 1733957799213 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957799213Cleaning up temporary data from old regions at 1733957799248 (+35 ms)Running coprocessor post-open hooks at 1733957799253 (+5 ms)Region opened successfully at 1733957799254 (+1 ms) 2024-12-11T22:56:39,255 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96., pid=26, masterSystemTime=1733957799205 2024-12-11T22:56:39,255 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.,because compaction is disabled. 2024-12-11T22:56:39,258 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:56:39,258 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:56:39,258 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:56:39,258 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => fa6198c2afe57e993d9d453d55d14dcf, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.', STARTKEY => '5', ENDKEY => ''} 2024-12-11T22:56:39,259 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. service=AccessControlService 2024-12-11T22:56:39,259 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:56:39,259 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=19b26f780d95a771738dd9b93c240d96, regionState=OPEN, openSeqNum=7, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:39,259 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,259 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:56:39,259 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,259 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,261 INFO [StoreOpener-fa6198c2afe57e993d9d453d55d14dcf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19b26f780d95a771738dd9b93c240d96, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:56:39,263 INFO [StoreOpener-fa6198c2afe57e993d9d453d55d14dcf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa6198c2afe57e993d9d453d55d14dcf columnFamilyName cf 2024-12-11T22:56:39,263 DEBUG [StoreOpener-fa6198c2afe57e993d9d453d55d14dcf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:39,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-11T22:56:39,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 19b26f780d95a771738dd9b93c240d96, server=b762025f20c5,41635,1733957771023 in 212 msec 2024-12-11T22:56:39,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, ASSIGN in 373 msec 2024-12-11T22:56:39,279 DEBUG [StoreFileOpener-fa6198c2afe57e993d9d453d55d14dcf-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48: NONE, but ROW specified in column family configuration 2024-12-11T22:56:39,281 DEBUG [StoreOpener-fa6198c2afe57e993d9d453d55d14dcf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_-top 2024-12-11T22:56:39,282 INFO [StoreOpener-fa6198c2afe57e993d9d453d55d14dcf-1 {}] regionserver.HStore(327): Store=fa6198c2afe57e993d9d453d55d14dcf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:56:39,282 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,283 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,284 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,285 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,285 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,287 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,288 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened fa6198c2afe57e993d9d453d55d14dcf; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63728576, jitterRate=-0.050370216369628906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:56:39,288 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,289 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for fa6198c2afe57e993d9d453d55d14dcf: Running coprocessor pre-open hook at 1733957799259Writing region info on filesystem at 1733957799260 (+1 ms)Initializing all the Stores at 1733957799261 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957799261Cleaning up temporary data from old regions at 1733957799285 (+24 ms)Running coprocessor post-open hooks at 1733957799288 (+3 ms)Region opened successfully at 1733957799289 (+1 ms) 2024-12-11T22:56:39,290 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf., pid=27, masterSystemTime=1733957799205 2024-12-11T22:56:39,290 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.,because compaction is disabled. 2024-12-11T22:56:39,293 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:56:39,293 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:56:39,294 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=fa6198c2afe57e993d9d453d55d14dcf, regionState=OPEN, openSeqNum=7, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:56:39,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa6198c2afe57e993d9d453d55d14dcf, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:56:39,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-11T22:56:39,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure fa6198c2afe57e993d9d453d55d14dcf, server=b762025f20c5,41635,1733957771023 in 244 msec 2024-12-11T22:56:39,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-11T22:56:39,305 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, ASSIGN in 407 msec 2024-12-11T22:56:39,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=433c5483abc2344dc88ce6d50b97ed48, daughterA=19b26f780d95a771738dd9b93c240d96, daughterB=fa6198c2afe57e993d9d453d55d14dcf in 892 msec 2024-12-11T22:56:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T22:56:39,555 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:56:39,555 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T22:56:39,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T22:56:39,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957799561 (current time:1733957799561). 2024-12-11T22:56:39,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:56:39,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T22:56:39,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:56:39,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502ec941, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:39,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:39,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:39,563 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:39,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:39,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:39,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41a3598a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:39,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:39,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:39,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:39,566 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:39,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cd46b90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:39,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:39,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:39,569 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47808, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:39,571 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:56:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:56:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:39,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:39,571 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f72f05b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:56:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:56:39,573 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:56:39,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:56:39,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:56:39,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f8095df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:39,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:56:39,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:56:39,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:39,575 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:56:39,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc1f92d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:56:39,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:56:39,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:56:39,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:39,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:39,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:56:39,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:56:39,585 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:56:39,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:56:39,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:56:39,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:39,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:56:39,587 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:56:39,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T22:56:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:56:39,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T22:56:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-11T22:56:39,590 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:56:39,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T22:56:39,592 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:56:39,595 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:56:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741868_1044 (size=197) 2024-12-11T22:56:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741868_1044 (size=197) 2024-12-11T22:56:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741868_1044 (size=197) 2024-12-11T22:56:39,606 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:56:39,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19b26f780d95a771738dd9b93c240d96}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fa6198c2afe57e993d9d453d55d14dcf}] 2024-12-11T22:56:39,610 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:39,610 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T22:56:39,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-11T22:56:39,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-11T22:56:39,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:56:39,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for fa6198c2afe57e993d9d453d55d14dcf: 2024-12-11T22:56:39,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:56:39,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 19b26f780d95a771738dd9b93c240d96: 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_-bottom] hfiles 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_-top] hfiles 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:39,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:39,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741870_1046 (size=182) 2024-12-11T22:56:39,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741870_1046 (size=182) 2024-12-11T22:56:39,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741870_1046 (size=182) 2024-12-11T22:56:39,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:56:39,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-11T22:56:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-11T22:56:39,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,795 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:56:39,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741869_1045 (size=182) 2024-12-11T22:56:39,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741869_1045 (size=182) 2024-12-11T22:56:39,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741869_1045 (size=182) 2024-12-11T22:56:39,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 19b26f780d95a771738dd9b93c240d96 in 194 msec 2024-12-11T22:56:39,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T22:56:40,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:56:40,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-11T22:56:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-11T22:56:40,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:40,200 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:56:40,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-11T22:56:40,206 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:56:40,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fa6198c2afe57e993d9d453d55d14dcf in 596 msec 2024-12-11T22:56:40,208 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:56:40,208 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:56:40,208 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:56:40,210 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_] hfiles 2024-12-11T22:56:40,210 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ 2024-12-11T22:56:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T22:56:40,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741871_1047 (size=129) 2024-12-11T22:56:40,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741871_1047 (size=129) 2024-12-11T22:56:40,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741871_1047 (size=129) 2024-12-11T22:56:40,240 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 433c5483abc2344dc88ce6d50b97ed48, NAME => 'testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:40,243 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:56:40,245 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:56:40,245 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:40,246 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:40,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741872_1048 (size=891) 2024-12-11T22:56:40,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741872_1048 (size=891) 2024-12-11T22:56:40,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741872_1048 (size=891) 2024-12-11T22:56:40,314 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:56:40,333 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:56:40,334 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:40,339 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:56:40,339 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-11T22:56:40,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 751 msec 2024-12-11T22:56:40,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T22:56:40,725 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:56:40,726 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725 2024-12-11T22:56:40,726 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:40,784 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:56:40,784 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:40,790 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T22:56:40,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:56:40,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741873_1049 (size=197) 2024-12-11T22:56:40,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741873_1049 (size=197) 2024-12-11T22:56:40,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741873_1049 (size=197) 2024-12-11T22:56:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741874_1050 (size=891) 2024-12-11T22:56:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741874_1050 (size=891) 2024-12-11T22:56:40,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741874_1050 (size=891) 2024-12-11T22:56:40,871 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:40,871 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:40,872 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-8695631574026656799.jar 2024-12-11T22:56:42,007 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-14458408332152737204.jar 2024-12-11T22:56:42,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,077 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,077 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,078 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:56:42,078 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T22:56:42,079 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T22:56:42,079 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T22:56:42,079 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T22:56:42,080 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T22:56:42,080 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T22:56:42,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T22:56:42,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T22:56:42,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T22:56:42,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T22:56:42,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T22:56:42,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:56:42,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:56:42,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:56:42,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:56:42,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:56:42,087 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:56:42,087 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:56:42,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741875_1051 (size=131440) 2024-12-11T22:56:42,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741875_1051 (size=131440) 2024-12-11T22:56:42,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741875_1051 (size=131440) 2024-12-11T22:56:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741876_1052 (size=4188619) 2024-12-11T22:56:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741876_1052 (size=4188619) 2024-12-11T22:56:42,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741876_1052 (size=4188619) 2024-12-11T22:56:42,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741877_1053 (size=1323991) 2024-12-11T22:56:42,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741877_1053 (size=1323991) 2024-12-11T22:56:42,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741877_1053 (size=1323991) 2024-12-11T22:56:42,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741878_1054 (size=6426222) 2024-12-11T22:56:42,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741878_1054 (size=6426222) 2024-12-11T22:56:42,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741878_1054 (size=6426222) 2024-12-11T22:56:42,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741879_1055 (size=903541) 2024-12-11T22:56:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741879_1055 (size=903541) 2024-12-11T22:56:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741879_1055 (size=903541) 2024-12-11T22:56:42,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741880_1056 (size=8360360) 2024-12-11T22:56:42,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741880_1056 (size=8360360) 2024-12-11T22:56:42,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741880_1056 (size=8360360) 2024-12-11T22:56:42,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741881_1057 (size=443172) 2024-12-11T22:56:42,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741881_1057 (size=443172) 2024-12-11T22:56:42,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741881_1057 (size=443172) 2024-12-11T22:56:42,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741882_1058 (size=1877034) 2024-12-11T22:56:42,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741882_1058 (size=1877034) 2024-12-11T22:56:42,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741882_1058 (size=1877034) 2024-12-11T22:56:42,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741883_1059 (size=77835) 2024-12-11T22:56:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741883_1059 (size=77835) 2024-12-11T22:56:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741883_1059 (size=77835) 2024-12-11T22:56:42,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741884_1060 (size=30949) 2024-12-11T22:56:42,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741884_1060 (size=30949) 2024-12-11T22:56:42,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741884_1060 (size=30949) 2024-12-11T22:56:42,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741885_1061 (size=1597213) 2024-12-11T22:56:42,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741885_1061 (size=1597213) 2024-12-11T22:56:42,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741885_1061 (size=1597213) 2024-12-11T22:56:44,208 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:56:46,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741886_1062 (size=4695811) 2024-12-11T22:56:46,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741886_1062 (size=4695811) 2024-12-11T22:56:46,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741886_1062 (size=4695811) 2024-12-11T22:56:46,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741887_1063 (size=232957) 2024-12-11T22:56:46,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741887_1063 (size=232957) 2024-12-11T22:56:46,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741887_1063 (size=232957) 2024-12-11T22:56:46,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741888_1064 (size=127628) 2024-12-11T22:56:46,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741888_1064 (size=127628) 2024-12-11T22:56:46,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741888_1064 (size=127628) 2024-12-11T22:56:46,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741889_1065 (size=20406) 2024-12-11T22:56:46,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741889_1065 (size=20406) 2024-12-11T22:56:46,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741889_1065 (size=20406) 2024-12-11T22:56:46,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741890_1066 (size=5175431) 2024-12-11T22:56:46,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741890_1066 (size=5175431) 2024-12-11T22:56:46,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741890_1066 (size=5175431) 2024-12-11T22:56:46,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741891_1067 (size=217634) 2024-12-11T22:56:46,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741891_1067 (size=217634) 2024-12-11T22:56:46,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741891_1067 (size=217634) 2024-12-11T22:56:48,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741892_1068 (size=1832290) 2024-12-11T22:56:48,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741892_1068 (size=1832290) 2024-12-11T22:56:48,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741892_1068 (size=1832290) 2024-12-11T22:56:48,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741893_1069 (size=322274) 2024-12-11T22:56:48,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741893_1069 (size=322274) 2024-12-11T22:56:48,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741893_1069 (size=322274) 2024-12-11T22:56:48,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741894_1070 (size=503880) 2024-12-11T22:56:48,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741894_1070 (size=503880) 2024-12-11T22:56:48,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741894_1070 (size=503880) 2024-12-11T22:56:48,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741895_1071 (size=29229) 2024-12-11T22:56:48,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741895_1071 (size=29229) 2024-12-11T22:56:48,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741895_1071 (size=29229) 2024-12-11T22:56:48,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741896_1072 (size=24096) 2024-12-11T22:56:48,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741896_1072 (size=24096) 2024-12-11T22:56:48,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741896_1072 (size=24096) 2024-12-11T22:56:48,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741897_1073 (size=111872) 2024-12-11T22:56:48,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741897_1073 (size=111872) 2024-12-11T22:56:48,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741897_1073 (size=111872) 2024-12-11T22:56:48,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741898_1074 (size=45609) 2024-12-11T22:56:48,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741898_1074 (size=45609) 2024-12-11T22:56:48,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741898_1074 (size=45609) 2024-12-11T22:56:48,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741899_1075 (size=136454) 2024-12-11T22:56:48,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741899_1075 (size=136454) 2024-12-11T22:56:48,303 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T22:56:48,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741899_1075 (size=136454) 2024-12-11T22:56:48,309 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-11T22:56:48,318 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=433c5483abc2344dc88ce6d50b97ed48-29c70957a4e4481884ed3c088884cae5_SeqId_4_. 2024-12-11T22:56:48,319 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=433c5483abc2344dc88ce6d50b97ed48-29c70957a4e4481884ed3c088884cae5_SeqId_4_. 2024-12-11T22:56:48,320 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-11T22:56:48,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741900_1076 (size=244) 2024-12-11T22:56:48,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741900_1076 (size=244) 2024-12-11T22:56:48,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741900_1076 (size=244) 2024-12-11T22:56:48,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741901_1077 (size=17) 2024-12-11T22:56:48,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741901_1077 (size=17) 2024-12-11T22:56:48,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741901_1077 (size=17) 2024-12-11T22:56:48,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741902_1078 (size=304052) 2024-12-11T22:56:48,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741902_1078 (size=304052) 2024-12-11T22:56:48,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741902_1078 (size=304052) 2024-12-11T22:56:51,403 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:56:51,403 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:56:51,576 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0001_000001 (auth:SIMPLE) from 127.0.0.1:50984 2024-12-11T22:56:56,761 INFO [master/b762025f20c5:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-11T22:56:56,761 INFO [master/b762025f20c5:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-11T22:57:01,713 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0001_000001 (auth:SIMPLE) from 127.0.0.1:41346 2024-12-11T22:57:02,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741903_1079 (size=349750) 2024-12-11T22:57:02,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741903_1079 (size=349750) 2024-12-11T22:57:02,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741903_1079 (size=349750) 2024-12-11T22:57:04,083 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0001_000001 (auth:SIMPLE) from 127.0.0.1:35640 2024-12-11T22:57:08,162 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3b4d716d5e3b511feeb6d02f44c1b924, had cached 0 bytes from a total of 5754 2024-12-11T22:57:08,211 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5de470ac35389e049b548004f0da5513, had cached 0 bytes from a total of 15937 2024-12-11T22:57:08,679 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:57:15,307 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5de470ac35389e049b548004f0da5513 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:57:15,308 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 83c7242f4212a8a5171da84f96da9bc6 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:57:15,308 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3b4d716d5e3b511feeb6d02f44c1b924 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:57:24,211 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 19b26f780d95a771738dd9b93c240d96, had cached 0 bytes from a total of 320414712 2024-12-11T22:57:24,259 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fa6198c2afe57e993d9d453d55d14dcf, had cached 0 bytes from a total of 320414712 2024-12-11T22:57:36,621 WARN [DataXceiver for client DFSClient_attempt_1733957780200_0001_m_000000_0_-1319502738_1 at /127.0.0.1:51894 [Receiving block BP-1399083668-172.17.0.2-1733957762347:blk_1073741904_1080] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2673ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/, blockId=1073741904, seqno=1479 2024-12-11T22:57:36,623 WARN [DataXceiver for client DFSClient_attempt_1733957780200_0001_m_000000_0_-1319502738_1 at /127.0.0.1:42078 [Receiving block BP-1399083668-172.17.0.2-1733957762347:blk_1073741904_1080] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2673ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/, blockId=1073741904, seqno=1479 2024-12-11T22:57:36,622 WARN [DataXceiver for client DFSClient_attempt_1733957780200_0001_m_000000_0_-1319502738_1 at /127.0.0.1:41436 [Receiving block BP-1399083668-172.17.0.2-1733957762347:blk_1073741904_1080] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2675ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/, blockId=1073741904, seqno=1479 2024-12-11T22:57:38,683 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:57:44,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741904_1080 (size=134217728) 2024-12-11T22:57:44,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741904_1080 (size=134217728) 2024-12-11T22:57:44,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741904_1080 (size=134217728) 2024-12-11T22:57:53,162 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3b4d716d5e3b511feeb6d02f44c1b924, had cached 0 bytes from a total of 5754 2024-12-11T22:57:53,212 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5de470ac35389e049b548004f0da5513, had cached 0 bytes from a total of 15937 2024-12-11T22:58:08,683 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:58:09,211 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 19b26f780d95a771738dd9b93c240d96, had cached 0 bytes from a total of 320414712 2024-12-11T22:58:09,260 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fa6198c2afe57e993d9d453d55d14dcf, had cached 0 bytes from a total of 320414712 2024-12-11T22:58:15,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741905_1081 (size=134217728) 2024-12-11T22:58:15,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741905_1081 (size=134217728) 2024-12-11T22:58:15,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741905_1081 (size=134217728) 2024-12-11T22:58:26,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741906_1082 (size=51979256) 2024-12-11T22:58:26,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741906_1082 (size=51979256) 2024-12-11T22:58:26,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741906_1082 (size=51979256) 2024-12-11T22:58:29,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741907_1083 (size=17531) 2024-12-11T22:58:29,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741907_1083 (size=17531) 2024-12-11T22:58:29,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741907_1083 (size=17531) 2024-12-11T22:58:29,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741908_1084 (size=482) 2024-12-11T22:58:29,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741908_1084 (size=482) 2024-12-11T22:58:29,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741908_1084 (size=482) 2024-12-11T22:58:29,225 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0001/container_1733957780200_0001_01_000002/launch_container.sh] 2024-12-11T22:58:29,225 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0001/container_1733957780200_0001_01_000002/container_tokens] 2024-12-11T22:58:29,226 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0001/container_1733957780200_0001_01_000002/sysfs] 2024-12-11T22:58:29,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741909_1085 (size=17531) 2024-12-11T22:58:29,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741909_1085 (size=17531) 2024-12-11T22:58:29,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741909_1085 (size=17531) 2024-12-11T22:58:29,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741910_1086 (size=349750) 2024-12-11T22:58:29,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741910_1086 (size=349750) 2024-12-11T22:58:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741910_1086 (size=349750) 2024-12-11T22:58:29,269 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0001_000001 (auth:SIMPLE) from 127.0.0.1:51790 2024-12-11T22:58:30,840 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T22:58:30,842 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T22:58:30,851 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:30,851 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T22:58:30,852 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T22:58:30,852 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:30,853 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-11T22:58:30,853 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-11T22:58:30,853 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:30,853 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-11T22:58:30,853 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957800725/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-11T22:58:30,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:30,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:30,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T22:58:30,900 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957910899"}]},"ts":"1733957910899"} 2024-12-11T22:58:30,903 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-11T22:58:30,903 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-11T22:58:30,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-11T22:58:30,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, UNASSIGN}] 2024-12-11T22:58:30,915 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, UNASSIGN 2024-12-11T22:58:30,915 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, UNASSIGN 2024-12-11T22:58:30,916 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=19b26f780d95a771738dd9b93c240d96, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:58:30,916 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=fa6198c2afe57e993d9d453d55d14dcf, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:58:30,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, UNASSIGN because future has completed 2024-12-11T22:58:30,920 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:58:30,920 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 19b26f780d95a771738dd9b93c240d96, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:58:30,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, UNASSIGN because future has completed 2024-12-11T22:58:30,921 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:58:30,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure fa6198c2afe57e993d9d453d55d14dcf, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:58:31,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T22:58:31,075 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:58:31,075 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:58:31,076 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 19b26f780d95a771738dd9b93c240d96, disabling compactions & flushes 2024-12-11T22:58:31,076 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:58:31,077 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:58:31,077 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. after waiting 0 ms 2024-12-11T22:58:31,077 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:58:31,088 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-11T22:58:31,089 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:58:31,089 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96. 2024-12-11T22:58:31,089 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 19b26f780d95a771738dd9b93c240d96: Waiting for close lock at 1733957911076Running coprocessor pre-close hooks at 1733957911076Disabling compacts and flushes for region at 1733957911076Disabling writes for close at 1733957911077 (+1 ms)Writing region close event to WAL at 1733957911079 (+2 ms)Running coprocessor post-close hooks at 1733957911089 (+10 ms)Closed at 1733957911089 2024-12-11T22:58:31,092 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 19b26f780d95a771738dd9b93c240d96 2024-12-11T22:58:31,092 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:58:31,092 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:58:31,092 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing fa6198c2afe57e993d9d453d55d14dcf, disabling compactions & flushes 2024-12-11T22:58:31,092 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:58:31,092 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:58:31,092 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. after waiting 0 ms 2024-12-11T22:58:31,092 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:58:31,093 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=19b26f780d95a771738dd9b93c240d96, regionState=CLOSED 2024-12-11T22:58:31,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 19b26f780d95a771738dd9b93c240d96, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:58:31,098 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-11T22:58:31,099 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:58:31,099 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf. 2024-12-11T22:58:31,100 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for fa6198c2afe57e993d9d453d55d14dcf: Waiting for close lock at 1733957911092Running coprocessor pre-close hooks at 1733957911092Disabling compacts and flushes for region at 1733957911092Disabling writes for close at 1733957911092Writing region close event to WAL at 1733957911093 (+1 ms)Running coprocessor post-close hooks at 1733957911099 (+6 ms)Closed at 1733957911099 2024-12-11T22:58:31,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-11T22:58:31,102 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:58:31,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 19b26f780d95a771738dd9b93c240d96, server=b762025f20c5,41635,1733957771023 in 177 msec 2024-12-11T22:58:31,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=19b26f780d95a771738dd9b93c240d96, UNASSIGN in 188 msec 2024-12-11T22:58:31,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=fa6198c2afe57e993d9d453d55d14dcf, regionState=CLOSED 2024-12-11T22:58:31,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure fa6198c2afe57e993d9d453d55d14dcf, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:58:31,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-11T22:58:31,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure fa6198c2afe57e993d9d453d55d14dcf, server=b762025f20c5,41635,1733957771023 in 188 msec 2024-12-11T22:58:31,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-11T22:58:31,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fa6198c2afe57e993d9d453d55d14dcf, UNASSIGN in 198 msec 2024-12-11T22:58:31,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-11T22:58:31,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 209 msec 2024-12-11T22:58:31,120 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957911120"}]},"ts":"1733957911120"} 2024-12-11T22:58:31,123 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-11T22:58:31,123 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-11T22:58:31,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 242 msec 2024-12-11T22:58:31,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T22:58:31,216 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:58:31,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,225 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,228 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,228 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49433, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:31,232 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38173, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-11T22:58:31,234 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,240 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:58:31,240 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96 2024-12-11T22:58:31,241 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:58:31,245 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/recovered.edits] 2024-12-11T22:58:31,245 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/recovered.edits] 2024-12-11T22:58:31,246 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/recovered.edits] 2024-12-11T22:58:31,298 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:58:31,298 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_ 2024-12-11T22:58:31,298 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/cf/29c70957a4e4481884ed3c088884cae5_SeqId_4_.433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:58:31,303 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/recovered.edits/6.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48/recovered.edits/6.seqid 2024-12-11T22:58:31,303 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/recovered.edits/10.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96/recovered.edits/10.seqid 2024-12-11T22:58:31,304 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/433c5483abc2344dc88ce6d50b97ed48 2024-12-11T22:58:31,304 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/19b26f780d95a771738dd9b93c240d96 2024-12-11T22:58:31,305 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/recovered.edits/10.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf/recovered.edits/10.seqid 2024-12-11T22:58:31,306 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportFileSystemStateWithSplitRegion/fa6198c2afe57e993d9d453d55d14dcf 2024-12-11T22:58:31,306 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-11T22:58:31,309 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-11T22:58:31,321 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-11T22:58:31,327 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-11T22:58:31,329 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,329 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-11T22:58:31,329 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957911329"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,330 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957911329"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,330 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957911329"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,333 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-11T22:58:31,333 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 433c5483abc2344dc88ce6d50b97ed48, NAME => 'testExportFileSystemStateWithSplitRegion,,1733957785817.433c5483abc2344dc88ce6d50b97ed48.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 19b26f780d95a771738dd9b93c240d96, NAME => 'testExportFileSystemStateWithSplitRegion,,1733957798412.19b26f780d95a771738dd9b93c240d96.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => fa6198c2afe57e993d9d453d55d14dcf, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733957798412.fa6198c2afe57e993d9d453d55d14dcf.', STARTKEY => '5', ENDKEY => ''}] 2024-12-11T22:58:31,334 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-11T22:58:31,334 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733957911334"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,340 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-11T22:58:31,342 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 122 msec 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-11T22:58:31,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:31,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:31,361 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:31,362 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:31,362 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:58:31,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T22:58:31,368 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957911368"}]},"ts":"1733957911368"} 2024-12-11T22:58:31,372 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-11T22:58:31,372 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-11T22:58:31,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-11T22:58:31,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, UNASSIGN}] 2024-12-11T22:58:31,379 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, UNASSIGN 2024-12-11T22:58:31,379 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, UNASSIGN 2024-12-11T22:58:31,380 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=3b4d716d5e3b511feeb6d02f44c1b924, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:31,380 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=5de470ac35389e049b548004f0da5513, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:58:31,381 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=b762025f20c5,33867,1733957770938, table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T22:58:31,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, UNASSIGN because future has completed 2024-12-11T22:58:31,384 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:58:31,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:58:31,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, UNASSIGN because future has completed 2024-12-11T22:58:31,387 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:58:31,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5de470ac35389e049b548004f0da5513, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:58:31,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T22:58:31,540 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60739, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:58:31,542 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46139, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:58:31,542 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 5de470ac35389e049b548004f0da5513 2024-12-11T22:58:31,543 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:58:31,543 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 5de470ac35389e049b548004f0da5513, disabling compactions & flushes 2024-12-11T22:58:31,543 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:58:31,543 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:58:31,543 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. after waiting 0 ms 2024-12-11T22:58:31,543 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:58:31,544 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:58:31,544 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:58:31,544 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 3b4d716d5e3b511feeb6d02f44c1b924, disabling compactions & flushes 2024-12-11T22:58:31,545 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:58:31,545 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:58:31,545 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. after waiting 0 ms 2024-12-11T22:58:31,545 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:58:31,559 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:58:31,560 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:58:31,560 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513. 2024-12-11T22:58:31,560 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 5de470ac35389e049b548004f0da5513: Waiting for close lock at 1733957911543Running coprocessor pre-close hooks at 1733957911543Disabling compacts and flushes for region at 1733957911543Disabling writes for close at 1733957911543Writing region close event to WAL at 1733957911545 (+2 ms)Running coprocessor post-close hooks at 1733957911560 (+15 ms)Closed at 1733957911560 2024-12-11T22:58:31,561 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:58:31,562 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:58:31,562 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924. 2024-12-11T22:58:31,562 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 3b4d716d5e3b511feeb6d02f44c1b924: Waiting for close lock at 1733957911544Running coprocessor pre-close hooks at 1733957911544Disabling compacts and flushes for region at 1733957911544Disabling writes for close at 1733957911545 (+1 ms)Writing region close event to WAL at 1733957911546 (+1 ms)Running coprocessor post-close hooks at 1733957911562 (+16 ms)Closed at 1733957911562 2024-12-11T22:58:31,564 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 5de470ac35389e049b548004f0da5513 2024-12-11T22:58:31,565 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=5de470ac35389e049b548004f0da5513, regionState=CLOSED 2024-12-11T22:58:31,566 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:58:31,568 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=3b4d716d5e3b511feeb6d02f44c1b924, regionState=CLOSED 2024-12-11T22:58:31,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5de470ac35389e049b548004f0da5513, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:58:31,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:58:31,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-12-11T22:58:31,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 5de470ac35389e049b548004f0da5513, server=b762025f20c5,33867,1733957770938 in 188 msec 2024-12-11T22:58:31,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-11T22:58:31,577 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 3b4d716d5e3b511feeb6d02f44c1b924, server=b762025f20c5,37563,1733957770731 in 191 msec 2024-12-11T22:58:31,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=5de470ac35389e049b548004f0da5513, UNASSIGN in 200 msec 2024-12-11T22:58:31,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-11T22:58:31,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3b4d716d5e3b511feeb6d02f44c1b924, UNASSIGN in 201 msec 2024-12-11T22:58:31,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-11T22:58:31,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 206 msec 2024-12-11T22:58:31,582 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957911582"}]},"ts":"1733957911582"} 2024-12-11T22:58:31,583 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-11T22:58:31,583 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-11T22:58:31,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 221 msec 2024-12-11T22:58:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T22:58:31,685 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:58:31,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,689 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,691 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,694 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,696 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:58:31,696 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513 2024-12-11T22:58:31,698 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/recovered.edits] 2024-12-11T22:58:31,698 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/recovered.edits] 2024-12-11T22:58:31,722 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf/968f6507c62d41fd9dc4e6223511e3e4 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/cf/968f6507c62d41fd9dc4e6223511e3e4 2024-12-11T22:58:31,722 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf/5276c58bd549450694c2a80aac16cb09 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/cf/5276c58bd549450694c2a80aac16cb09 2024-12-11T22:58:31,728 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513/recovered.edits/9.seqid 2024-12-11T22:58:31,728 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924/recovered.edits/9.seqid 2024-12-11T22:58:31,729 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/5de470ac35389e049b548004f0da5513 2024-12-11T22:58:31,729 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSplitRegion/3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:58:31,729 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-11T22:58:31,730 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-11T22:58:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,733 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,733 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,733 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,733 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T22:58:31,734 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-11T22:58:31,740 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202412115edcaebf4c6341b085a1fa7b8d587ea3_5de470ac35389e049b548004f0da5513 2024-12-11T22:58:31,740 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412119f1e8d968af749d2b1167827f65cae97_3b4d716d5e3b511feeb6d02f44c1b924 2024-12-11T22:58:31,740 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-11T22:58:31,743 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-11T22:58:31,747 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-11T22:58:31,750 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-11T22:58:31,752 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,752 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-11T22:58:31,752 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957911752"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,752 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957911752"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,755 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T22:58:31,755 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3b4d716d5e3b511feeb6d02f44c1b924, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733957782484.3b4d716d5e3b511feeb6d02f44c1b924.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5de470ac35389e049b548004f0da5513, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733957782484.5de470ac35389e049b548004f0da5513.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T22:58:31,756 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-11T22:58:31,756 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733957911756"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:31,759 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-11T22:58:31,760 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 74 msec 2024-12-11T22:58:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-11T22:58:31,856 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,856 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T22:58:31,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T22:58:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T22:58:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T22:58:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:31,927 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=761 (was 711) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 113364) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:46156 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1144705017_1 at /127.0.0.1:38468 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:36212 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1144705017_1 at /127.0.0.1:46138 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45215 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1405 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45215 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:38502 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=773 (was 759) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=852 (was 1220), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2788 (was 2806) 2024-12-11T22:58:31,927 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=761 is superior to 500 2024-12-11T22:58:31,941 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=761, OpenFileDescriptor=773, MaxFileDescriptor=1048576, SystemLoadAverage=852, ProcessCount=17, AvailableMemoryMB=2787 2024-12-11T22:58:31,941 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=761 is superior to 500 2024-12-11T22:58:31,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:58:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:31,945 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:58:31,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-11T22:58:31,946 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:58:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T22:58:31,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741911_1087 (size=442) 2024-12-11T22:58:31,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741911_1087 (size=442) 2024-12-11T22:58:31,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741911_1087 (size=442) 2024-12-11T22:58:31,958 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ed64762dccf3df815c33a9699a0e1643, NAME => 'testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:31,958 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c3bb275b22e0dd40b997269227cb02a0, NAME => 'testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:31,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741912_1088 (size=67) 2024-12-11T22:58:31,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741913_1089 (size=67) 2024-12-11T22:58:31,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741913_1089 (size=67) 2024-12-11T22:58:31,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741912_1088 (size=67) 2024-12-11T22:58:31,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741912_1088 (size=67) 2024-12-11T22:58:31,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741913_1089 (size=67) 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing ed64762dccf3df815c33a9699a0e1643, disabling compactions & flushes 2024-12-11T22:58:31,969 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. after waiting 0 ms 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:31,969 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing c3bb275b22e0dd40b997269227cb02a0, disabling compactions & flushes 2024-12-11T22:58:31,969 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for ed64762dccf3df815c33a9699a0e1643: Waiting for close lock at 1733957911969Disabling compacts and flushes for region at 1733957911969Disabling writes for close at 1733957911969Writing region close event to WAL at 1733957911969Closed at 1733957911969 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. after waiting 0 ms 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:31,969 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:31,969 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for c3bb275b22e0dd40b997269227cb02a0: Waiting for close lock at 1733957911969Disabling compacts and flushes for region at 1733957911969Disabling writes for close at 1733957911969Writing region close event to WAL at 1733957911969Closed at 1733957911969 2024-12-11T22:58:31,970 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:58:31,970 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733957911970"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957911970"}]},"ts":"1733957911970"} 2024-12-11T22:58:31,971 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733957911970"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957911970"}]},"ts":"1733957911970"} 2024-12-11T22:58:31,973 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T22:58:31,974 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:58:31,974 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957911974"}]},"ts":"1733957911974"} 2024-12-11T22:58:31,976 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-11T22:58:31,977 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:58:31,978 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:58:31,978 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:58:31,978 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:58:31,978 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:58:31,978 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, ASSIGN}] 2024-12-11T22:58:31,980 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, ASSIGN 2024-12-11T22:58:31,980 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, ASSIGN 2024-12-11T22:58:31,980 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T22:58:31,980 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T22:58:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T22:58:32,131 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:58:32,132 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=ed64762dccf3df815c33a9699a0e1643, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:32,132 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=c3bb275b22e0dd40b997269227cb02a0, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:58:32,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, ASSIGN because future has completed 2024-12-11T22:58:32,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure c3bb275b22e0dd40b997269227cb02a0, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:58:32,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, ASSIGN because future has completed 2024-12-11T22:58:32,140 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed64762dccf3df815c33a9699a0e1643, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:58:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T22:58:32,295 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:32,295 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => c3bb275b22e0dd40b997269227cb02a0, NAME => 'testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T22:58:32,296 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. service=AccessControlService 2024-12-11T22:58:32,296 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:58:32,296 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,296 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:32,296 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,296 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,296 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,296 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => ed64762dccf3df815c33a9699a0e1643, NAME => 'testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T22:58:32,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. service=AccessControlService 2024-12-11T22:58:32,297 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:58:32,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:32,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,297 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,298 INFO [StoreOpener-c3bb275b22e0dd40b997269227cb02a0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,298 INFO [StoreOpener-ed64762dccf3df815c33a9699a0e1643-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,299 INFO [StoreOpener-c3bb275b22e0dd40b997269227cb02a0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c3bb275b22e0dd40b997269227cb02a0 columnFamilyName cf 2024-12-11T22:58:32,299 INFO [StoreOpener-ed64762dccf3df815c33a9699a0e1643-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed64762dccf3df815c33a9699a0e1643 columnFamilyName cf 2024-12-11T22:58:32,300 DEBUG [StoreOpener-ed64762dccf3df815c33a9699a0e1643-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:32,300 DEBUG [StoreOpener-c3bb275b22e0dd40b997269227cb02a0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:32,301 INFO [StoreOpener-ed64762dccf3df815c33a9699a0e1643-1 {}] regionserver.HStore(327): Store=ed64762dccf3df815c33a9699a0e1643/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:58:32,301 INFO [StoreOpener-c3bb275b22e0dd40b997269227cb02a0-1 {}] regionserver.HStore(327): Store=c3bb275b22e0dd40b997269227cb02a0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:58:32,301 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,301 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,302 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,302 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,303 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,305 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,305 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,307 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:58:32,307 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:58:32,307 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened c3bb275b22e0dd40b997269227cb02a0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74556098, jitterRate=0.11097243428230286}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:58:32,307 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened ed64762dccf3df815c33a9699a0e1643; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64924151, jitterRate=-0.032554760575294495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:58:32,307 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,307 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,308 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for c3bb275b22e0dd40b997269227cb02a0: Running coprocessor pre-open hook at 1733957912296Writing region info on filesystem at 1733957912296Initializing all the Stores at 1733957912297 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957912297Cleaning up temporary data from old regions at 1733957912303 (+6 ms)Running coprocessor post-open hooks at 1733957912307 (+4 ms)Region opened successfully at 1733957912308 (+1 ms) 2024-12-11T22:58:32,308 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for ed64762dccf3df815c33a9699a0e1643: Running coprocessor pre-open hook at 1733957912297Writing region info on filesystem at 1733957912297Initializing all the Stores at 1733957912298 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957912298Cleaning up temporary data from old regions at 1733957912303 (+5 ms)Running coprocessor post-open hooks at 1733957912307 (+4 ms)Region opened successfully at 1733957912308 (+1 ms) 2024-12-11T22:58:32,309 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0., pid=48, masterSystemTime=1733957912291 2024-12-11T22:58:32,309 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643., pid=49, masterSystemTime=1733957912293 2024-12-11T22:58:32,310 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:32,310 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:32,311 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=c3bb275b22e0dd40b997269227cb02a0, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:58:32,311 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,311 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,311 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=ed64762dccf3df815c33a9699a0e1643, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:32,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure c3bb275b22e0dd40b997269227cb02a0, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:58:32,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure ed64762dccf3df815c33a9699a0e1643, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:58:32,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-11T22:58:32,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure c3bb275b22e0dd40b997269227cb02a0, server=b762025f20c5,33867,1733957770938 in 177 msec 2024-12-11T22:58:32,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-11T22:58:32,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure ed64762dccf3df815c33a9699a0e1643, server=b762025f20c5,37563,1733957770731 in 176 msec 2024-12-11T22:58:32,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, ASSIGN in 338 msec 2024-12-11T22:58:32,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-12-11T22:58:32,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, ASSIGN in 339 msec 2024-12-11T22:58:32,320 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:58:32,320 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957912320"}]},"ts":"1733957912320"} 2024-12-11T22:58:32,322 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-11T22:58:32,323 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:58:32,323 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-11T22:58:32,326 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T22:58:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:32,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:32,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:32,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:32,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:32,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:32,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 401 msec 2024-12-11T22:58:32,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T22:58:32,575 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T22:58:32,576 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,580 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-11T22:58:32,580 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,581 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:58:32,583 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,591 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,595 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,597 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35924, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,598 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T22:58:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957912601 (current time:1733957912601). 2024-12-11T22:58:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:58:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-11T22:58:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:58:32,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4994c3c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:32,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:32,603 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:32,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:32,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:32,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dbba8e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:32,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:32,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,605 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:32,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f37ac01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:32,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:32,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:32,607 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:58:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,608 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e142644, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:32,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:32,610 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:32,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:32,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:32,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b4f0d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:32,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:32,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,611 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:32,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c8c2be2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:32,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:32,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:32,614 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:58:32,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:32,618 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:58:32,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:32,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,620 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:32,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T22:58:32,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:58:32,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T22:58:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-11T22:58:32,623 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:58:32,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T22:58:32,624 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:58:32,627 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:58:32,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741914_1090 (size=167) 2024-12-11T22:58:32,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741914_1090 (size=167) 2024-12-11T22:58:32,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741914_1090 (size=167) 2024-12-11T22:58:32,635 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:58:32,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0}] 2024-12-11T22:58:32,636 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,636 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T22:58:32,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-11T22:58:32,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-11T22:58:32,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:32,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for ed64762dccf3df815c33a9699a0e1643: 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for c3bb275b22e0dd40b997269227cb02a0: 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. for emptySnaptb0-testExportWithTargetName completed. 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. for emptySnaptb0-testExportWithTargetName completed. 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:32,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:58:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741916_1092 (size=70) 2024-12-11T22:58:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741915_1091 (size=70) 2024-12-11T22:58:32,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741916_1092 (size=70) 2024-12-11T22:58:32,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741915_1091 (size=70) 2024-12-11T22:58:32,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-11T22:58:32,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741916_1092 (size=70) 2024-12-11T22:58:32,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741915_1091 (size=70) 2024-12-11T22:58:32,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:32,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-11T22:58:32,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-11T22:58:32,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-11T22:58:32,808 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,808 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:32,808 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:32,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643 in 174 msec 2024-12-11T22:58:32,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=50 2024-12-11T22:58:32,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0 in 174 msec 2024-12-11T22:58:32,811 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:58:32,812 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:58:32,813 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:58:32,813 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:58:32,813 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:32,814 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T22:58:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741917_1093 (size=62) 2024-12-11T22:58:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741917_1093 (size=62) 2024-12-11T22:58:32,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741917_1093 (size=62) 2024-12-11T22:58:32,825 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:58:32,825 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-11T22:58:32,826 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-11T22:58:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741918_1094 (size=649) 2024-12-11T22:58:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741918_1094 (size=649) 2024-12-11T22:58:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741918_1094 (size=649) 2024-12-11T22:58:32,840 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:58:32,851 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:58:32,852 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-11T22:58:32,854 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:58:32,854 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-11T22:58:32,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 233 msec 2024-12-11T22:58:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T22:58:32,945 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T22:58:32,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:58:32,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:58:32,956 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,959 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-11T22:58:32,959 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:32,959 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:58:32,961 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,974 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T22:58:32,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T22:58:32,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957912977 (current time:1733957912977). 2024-12-11T22:58:32,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:58:32,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-11T22:58:32,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:58:32,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4549980b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:32,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:32,978 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@368c1aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,980 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35592, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:32,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eefc16f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:32,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:32,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:32,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,984 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:58:32,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:32,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,984 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:32,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c76cee2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:32,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:32,986 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:32,986 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:32,986 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:32,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16edd7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:32,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:32,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,988 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35624, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:32,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4209c43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:32,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:32,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:32,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:32,992 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56946, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:58:32,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:32,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:32,997 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T22:58:32,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:32,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:32,997 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:32,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T22:58:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:58:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T22:58:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-11T22:58:33,000 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:58:33,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T22:58:33,001 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:58:33,004 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:58:33,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741919_1095 (size=162) 2024-12-11T22:58:33,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741919_1095 (size=162) 2024-12-11T22:58:33,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741919_1095 (size=162) 2024-12-11T22:58:33,013 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:58:33,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0}] 2024-12-11T22:58:33,014 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:33,014 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:33,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T22:58:33,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-11T22:58:33,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-11T22:58:33,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:33,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:33,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing ed64762dccf3df815c33a9699a0e1643 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T22:58:33,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing c3bb275b22e0dd40b997269227cb02a0 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T22:58:33,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643 is 71, key is 01b9a27bcd733382a8ba33cc3a596411/cf:q/1733957912952/Put/seqid=0 2024-12-11T22:58:33,186 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0 is 71, key is 13e4cc71846c5316323d43837d1595fa/cf:q/1733957912954/Put/seqid=0 2024-12-11T22:58:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741920_1096 (size=5171) 2024-12-11T22:58:33,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741920_1096 (size=5171) 2024-12-11T22:58:33,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:33,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741920_1096 (size=5171) 2024-12-11T22:58:33,199 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:33,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/.tmp/cf/80c84e7edddb46b98ec2ba4896141fea, store: [table=testtb-testExportWithTargetName family=cf region=ed64762dccf3df815c33a9699a0e1643] 2024-12-11T22:58:33,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741921_1097 (size=8101) 2024-12-11T22:58:33,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/.tmp/cf/80c84e7edddb46b98ec2ba4896141fea is 208, key is 0710c9a147db1b73b176cd60e740e81ad/cf:q/1733957912952/Put/seqid=0 2024-12-11T22:58:33,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741921_1097 (size=8101) 2024-12-11T22:58:33,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741921_1097 (size=8101) 2024-12-11T22:58:33,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:33,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741922_1098 (size=6116) 2024-12-11T22:58:33,208 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:33,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741922_1098 (size=6116) 2024-12-11T22:58:33,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741922_1098 (size=6116) 2024-12-11T22:58:33,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/.tmp/cf/80c84e7edddb46b98ec2ba4896141fea 2024-12-11T22:58:33,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/.tmp/cf/e47e14b62c3945028a37e682c85f32a5, store: [table=testtb-testExportWithTargetName family=cf region=c3bb275b22e0dd40b997269227cb02a0] 2024-12-11T22:58:33,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/.tmp/cf/e47e14b62c3945028a37e682c85f32a5 is 208, key is 1e1f2799e56f0fa8986f60cb89a68544a/cf:q/1733957912954/Put/seqid=0 2024-12-11T22:58:33,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741923_1099 (size=14745) 2024-12-11T22:58:33,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741923_1099 (size=14745) 2024-12-11T22:58:33,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741923_1099 (size=14745) 2024-12-11T22:58:33,216 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/.tmp/cf/e47e14b62c3945028a37e682c85f32a5 2024-12-11T22:58:33,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/.tmp/cf/80c84e7edddb46b98ec2ba4896141fea as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf/80c84e7edddb46b98ec2ba4896141fea 2024-12-11T22:58:33,225 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/.tmp/cf/e47e14b62c3945028a37e682c85f32a5 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf/e47e14b62c3945028a37e682c85f32a5 2024-12-11T22:58:33,225 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf/80c84e7edddb46b98ec2ba4896141fea, entries=4, sequenceid=6, filesize=6.0 K 2024-12-11T22:58:33,226 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for ed64762dccf3df815c33a9699a0e1643 in 60ms, sequenceid=6, compaction requested=false 2024-12-11T22:58:33,226 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-11T22:58:33,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for ed64762dccf3df815c33a9699a0e1643: 2024-12-11T22:58:33,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. for snaptb0-testExportWithTargetName completed. 2024-12-11T22:58:33,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-11T22:58:33,227 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:33,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf/80c84e7edddb46b98ec2ba4896141fea] hfiles 2024-12-11T22:58:33,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf/80c84e7edddb46b98ec2ba4896141fea for snapshot=snaptb0-testExportWithTargetName 2024-12-11T22:58:33,231 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf/e47e14b62c3945028a37e682c85f32a5, entries=46, sequenceid=6, filesize=14.4 K 2024-12-11T22:58:33,232 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for c3bb275b22e0dd40b997269227cb02a0 in 66ms, sequenceid=6, compaction requested=false 2024-12-11T22:58:33,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for c3bb275b22e0dd40b997269227cb02a0: 2024-12-11T22:58:33,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. for snaptb0-testExportWithTargetName completed. 2024-12-11T22:58:33,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-11T22:58:33,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:33,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf/e47e14b62c3945028a37e682c85f32a5] hfiles 2024-12-11T22:58:33,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf/e47e14b62c3945028a37e682c85f32a5 for snapshot=snaptb0-testExportWithTargetName 2024-12-11T22:58:33,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741924_1100 (size=109) 2024-12-11T22:58:33,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741924_1100 (size=109) 2024-12-11T22:58:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741924_1100 (size=109) 2024-12-11T22:58:33,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:33,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-11T22:58:33,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-11T22:58:33,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:33,246 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:33,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ed64762dccf3df815c33a9699a0e1643 in 234 msec 2024-12-11T22:58:33,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741925_1101 (size=109) 2024-12-11T22:58:33,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741925_1101 (size=109) 2024-12-11T22:58:33,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741925_1101 (size=109) 2024-12-11T22:58:33,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:33,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-11T22:58:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-11T22:58:33,255 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:33,255 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:33,258 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-11T22:58:33,259 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c3bb275b22e0dd40b997269227cb02a0 in 244 msec 2024-12-11T22:58:33,259 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:58:33,260 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:58:33,261 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:58:33,261 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:58:33,261 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:33,263 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643] hfiles 2024-12-11T22:58:33,263 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:33,263 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:33,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741926_1102 (size=293) 2024-12-11T22:58:33,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741926_1102 (size=293) 2024-12-11T22:58:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741926_1102 (size=293) 2024-12-11T22:58:33,283 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:58:33,283 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-11T22:58:33,284 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-11T22:58:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T22:58:33,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741927_1103 (size=959) 2024-12-11T22:58:33,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741927_1103 (size=959) 2024-12-11T22:58:33,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741927_1103 (size=959) 2024-12-11T22:58:33,329 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:58:33,337 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:58:33,338 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-11T22:58:33,340 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:58:33,340 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-11T22:58:33,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 342 msec 2024-12-11T22:58:33,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T22:58:33,625 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T22:58:33,625 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625 2024-12-11T22:58:33,626 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:33,664 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:33,664 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-11T22:58:33,667 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T22:58:33,672 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-11T22:58:33,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741928_1104 (size=162) 2024-12-11T22:58:33,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741928_1104 (size=162) 2024-12-11T22:58:33,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741928_1104 (size=162) 2024-12-11T22:58:33,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741929_1105 (size=959) 2024-12-11T22:58:33,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741929_1105 (size=959) 2024-12-11T22:58:33,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741929_1105 (size=959) 2024-12-11T22:58:33,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741930_1106 (size=154) 2024-12-11T22:58:33,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741930_1106 (size=154) 2024-12-11T22:58:33,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741930_1106 (size=154) 2024-12-11T22:58:33,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:33,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:33,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-5044064685335421669.jar 2024-12-11T22:58:34,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-6562931569388671889.jar 2024-12-11T22:58:34,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T22:58:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T22:58:34,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T22:58:34,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T22:58:34,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T22:58:34,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T22:58:34,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T22:58:34,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T22:58:34,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T22:58:34,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T22:58:34,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T22:58:34,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:34,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:34,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:58:34,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:34,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:34,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:58:34,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:58:34,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741931_1107 (size=131440) 2024-12-11T22:58:34,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741931_1107 (size=131440) 2024-12-11T22:58:34,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741931_1107 (size=131440) 2024-12-11T22:58:34,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741932_1108 (size=4188619) 2024-12-11T22:58:34,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741932_1108 (size=4188619) 2024-12-11T22:58:34,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741932_1108 (size=4188619) 2024-12-11T22:58:34,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741933_1109 (size=1323991) 2024-12-11T22:58:34,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741933_1109 (size=1323991) 2024-12-11T22:58:34,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741933_1109 (size=1323991) 2024-12-11T22:58:34,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741934_1110 (size=903541) 2024-12-11T22:58:34,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741934_1110 (size=903541) 2024-12-11T22:58:34,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741934_1110 (size=903541) 2024-12-11T22:58:34,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741935_1111 (size=8360360) 2024-12-11T22:58:34,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741935_1111 (size=8360360) 2024-12-11T22:58:34,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741935_1111 (size=8360360) 2024-12-11T22:58:34,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741936_1112 (size=1877034) 2024-12-11T22:58:34,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741936_1112 (size=1877034) 2024-12-11T22:58:34,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741936_1112 (size=1877034) 2024-12-11T22:58:34,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741937_1113 (size=77835) 2024-12-11T22:58:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741937_1113 (size=77835) 2024-12-11T22:58:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741937_1113 (size=77835) 2024-12-11T22:58:34,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741938_1114 (size=30949) 2024-12-11T22:58:34,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741938_1114 (size=30949) 2024-12-11T22:58:34,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741938_1114 (size=30949) 2024-12-11T22:58:34,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741939_1115 (size=1597213) 2024-12-11T22:58:34,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741939_1115 (size=1597213) 2024-12-11T22:58:34,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741939_1115 (size=1597213) 2024-12-11T22:58:34,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741940_1116 (size=4695811) 2024-12-11T22:58:34,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741940_1116 (size=4695811) 2024-12-11T22:58:34,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741940_1116 (size=4695811) 2024-12-11T22:58:34,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741941_1117 (size=232957) 2024-12-11T22:58:34,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741941_1117 (size=232957) 2024-12-11T22:58:34,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741941_1117 (size=232957) 2024-12-11T22:58:34,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741942_1118 (size=127628) 2024-12-11T22:58:34,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741942_1118 (size=127628) 2024-12-11T22:58:34,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741942_1118 (size=127628) 2024-12-11T22:58:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741943_1119 (size=20406) 2024-12-11T22:58:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741943_1119 (size=20406) 2024-12-11T22:58:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741943_1119 (size=20406) 2024-12-11T22:58:34,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741944_1120 (size=5175431) 2024-12-11T22:58:34,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741944_1120 (size=5175431) 2024-12-11T22:58:34,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741944_1120 (size=5175431) 2024-12-11T22:58:34,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741945_1121 (size=217634) 2024-12-11T22:58:34,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741945_1121 (size=217634) 2024-12-11T22:58:34,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741945_1121 (size=217634) 2024-12-11T22:58:34,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741946_1122 (size=1832290) 2024-12-11T22:58:34,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741946_1122 (size=1832290) 2024-12-11T22:58:34,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741946_1122 (size=1832290) 2024-12-11T22:58:34,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741947_1123 (size=322274) 2024-12-11T22:58:34,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741947_1123 (size=322274) 2024-12-11T22:58:34,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741947_1123 (size=322274) 2024-12-11T22:58:34,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741948_1124 (size=503880) 2024-12-11T22:58:34,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741948_1124 (size=503880) 2024-12-11T22:58:34,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741948_1124 (size=503880) 2024-12-11T22:58:35,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741949_1125 (size=443172) 2024-12-11T22:58:35,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741949_1125 (size=443172) 2024-12-11T22:58:35,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741949_1125 (size=443172) 2024-12-11T22:58:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741950_1126 (size=29229) 2024-12-11T22:58:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741950_1126 (size=29229) 2024-12-11T22:58:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741950_1126 (size=29229) 2024-12-11T22:58:35,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741951_1127 (size=24096) 2024-12-11T22:58:35,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741951_1127 (size=24096) 2024-12-11T22:58:35,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741951_1127 (size=24096) 2024-12-11T22:58:35,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741952_1128 (size=6426222) 2024-12-11T22:58:35,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741952_1128 (size=6426222) 2024-12-11T22:58:35,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741952_1128 (size=6426222) 2024-12-11T22:58:35,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741953_1129 (size=111872) 2024-12-11T22:58:35,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741953_1129 (size=111872) 2024-12-11T22:58:35,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741953_1129 (size=111872) 2024-12-11T22:58:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741954_1130 (size=45609) 2024-12-11T22:58:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741954_1130 (size=45609) 2024-12-11T22:58:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741954_1130 (size=45609) 2024-12-11T22:58:35,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741955_1131 (size=136454) 2024-12-11T22:58:35,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741955_1131 (size=136454) 2024-12-11T22:58:35,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741955_1131 (size=136454) 2024-12-11T22:58:35,067 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T22:58:35,070 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-11T22:58:35,072 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-12-11T22:58:35,072 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-11T22:58:35,072 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-11T22:58:35,072 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T22:58:35,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741956_1132 (size=1031) 2024-12-11T22:58:35,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741956_1132 (size=1031) 2024-12-11T22:58:35,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741956_1132 (size=1031) 2024-12-11T22:58:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741957_1133 (size=35) 2024-12-11T22:58:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741957_1133 (size=35) 2024-12-11T22:58:35,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741957_1133 (size=35) 2024-12-11T22:58:35,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741958_1134 (size=303999) 2024-12-11T22:58:35,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741958_1134 (size=303999) 2024-12-11T22:58:35,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741958_1134 (size=303999) 2024-12-11T22:58:35,346 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:58:35,346 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:58:35,349 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0001_000001 (auth:SIMPLE) from 127.0.0.1:57632 2024-12-11T22:58:35,356 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0001/container_1733957780200_0001_01_000001/launch_container.sh] 2024-12-11T22:58:35,357 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0001/container_1733957780200_0001_01_000001/container_tokens] 2024-12-11T22:58:35,357 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0001/container_1733957780200_0001_01_000001/sysfs] 2024-12-11T22:58:36,213 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:51800 2024-12-11T22:58:36,619 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:58:38,684 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:58:40,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-11T22:58:40,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-11T22:58:40,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:40,121 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T22:58:42,196 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:48402 2024-12-11T22:58:42,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741959_1135 (size=349697) 2024-12-11T22:58:42,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741959_1135 (size=349697) 2024-12-11T22:58:42,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741959_1135 (size=349697) 2024-12-11T22:58:44,499 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:46200 2024-12-11T22:58:44,499 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:59020 2024-12-11T22:58:45,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:59030 2024-12-11T22:58:45,376 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:46204 2024-12-11T22:58:45,623 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:58:47,349 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T22:58:50,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741960_1136 (size=8101) 2024-12-11T22:58:50,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741960_1136 (size=8101) 2024-12-11T22:58:50,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741960_1136 (size=8101) 2024-12-11T22:58:53,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741962_1138 (size=5171) 2024-12-11T22:58:53,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741962_1138 (size=5171) 2024-12-11T22:58:53,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741962_1138 (size=5171) 2024-12-11T22:58:53,399 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000005/launch_container.sh] 2024-12-11T22:58:53,399 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000005/container_tokens] 2024-12-11T22:58:53,399 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000005/sysfs] 2024-12-11T22:58:53,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741963_1139 (size=6116) 2024-12-11T22:58:53,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741963_1139 (size=6116) 2024-12-11T22:58:53,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741963_1139 (size=6116) 2024-12-11T22:58:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741964_1140 (size=14745) 2024-12-11T22:58:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741964_1140 (size=14745) 2024-12-11T22:58:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741964_1140 (size=14745) 2024-12-11T22:58:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741961_1137 (size=31744) 2024-12-11T22:58:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741961_1137 (size=31744) 2024-12-11T22:58:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741961_1137 (size=31744) 2024-12-11T22:58:53,553 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000004/launch_container.sh] 2024-12-11T22:58:53,554 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000004/container_tokens] 2024-12-11T22:58:53,554 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000004/sysfs] 2024-12-11T22:58:53,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741965_1141 (size=465) 2024-12-11T22:58:53,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741965_1141 (size=465) 2024-12-11T22:58:53,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741965_1141 (size=465) 2024-12-11T22:58:53,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741966_1142 (size=31744) 2024-12-11T22:58:53,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741966_1142 (size=31744) 2024-12-11T22:58:53,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741966_1142 (size=31744) 2024-12-11T22:58:53,608 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000002/launch_container.sh] 2024-12-11T22:58:53,608 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000002/container_tokens] 2024-12-11T22:58:53,608 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000002/sysfs] 2024-12-11T22:58:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741967_1143 (size=349697) 2024-12-11T22:58:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741967_1143 (size=349697) 2024-12-11T22:58:53,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741967_1143 (size=349697) 2024-12-11T22:58:53,634 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:55606 2024-12-11T22:58:53,641 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:32778 2024-12-11T22:58:53,650 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:32782 2024-12-11T22:58:55,299 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T22:58:55,301 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T22:58:55,310 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-11T22:58:55,310 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T22:58:55,311 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T22:58:55,312 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-11T22:58:55,312 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-11T22:58:55,312 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-11T22:58:55,312 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625/.hbase-snapshot/testExportWithTargetName 2024-12-11T22:58:55,313 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-11T22:58:55,313 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957913625/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-11T22:58:55,320 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-11T22:58:55,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T22:58:55,324 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957935324"}]},"ts":"1733957935324"} 2024-12-11T22:58:55,327 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-11T22:58:55,327 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-11T22:58:55,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-11T22:58:55,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, UNASSIGN}] 2024-12-11T22:58:55,332 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, UNASSIGN 2024-12-11T22:58:55,332 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, UNASSIGN 2024-12-11T22:58:55,333 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=c3bb275b22e0dd40b997269227cb02a0, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:58:55,334 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=ed64762dccf3df815c33a9699a0e1643, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:55,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, UNASSIGN because future has completed 2024-12-11T22:58:55,336 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:58:55,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure c3bb275b22e0dd40b997269227cb02a0, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:58:55,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, UNASSIGN because future has completed 2024-12-11T22:58:55,337 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:58:55,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed64762dccf3df815c33a9699a0e1643, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:58:55,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T22:58:55,490 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:55,490 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:58:55,491 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing c3bb275b22e0dd40b997269227cb02a0, disabling compactions & flushes 2024-12-11T22:58:55,491 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:55,491 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:55,491 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:55,491 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:58:55,491 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. after waiting 0 ms 2024-12-11T22:58:55,491 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:55,491 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing ed64762dccf3df815c33a9699a0e1643, disabling compactions & flushes 2024-12-11T22:58:55,491 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:55,492 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:55,492 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. after waiting 0 ms 2024-12-11T22:58:55,492 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:55,500 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:58:55,500 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:58:55,501 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:58:55,501 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:58:55,501 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643. 2024-12-11T22:58:55,501 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0. 2024-12-11T22:58:55,501 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for ed64762dccf3df815c33a9699a0e1643: Waiting for close lock at 1733957935491Running coprocessor pre-close hooks at 1733957935491Disabling compacts and flushes for region at 1733957935491Disabling writes for close at 1733957935492 (+1 ms)Writing region close event to WAL at 1733957935494 (+2 ms)Running coprocessor post-close hooks at 1733957935501 (+7 ms)Closed at 1733957935501 2024-12-11T22:58:55,501 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for c3bb275b22e0dd40b997269227cb02a0: Waiting for close lock at 1733957935490Running coprocessor pre-close hooks at 1733957935490Disabling compacts and flushes for region at 1733957935490Disabling writes for close at 1733957935491 (+1 ms)Writing region close event to WAL at 1733957935493 (+2 ms)Running coprocessor post-close hooks at 1733957935501 (+8 ms)Closed at 1733957935501 2024-12-11T22:58:55,503 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:55,504 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=ed64762dccf3df815c33a9699a0e1643, regionState=CLOSED 2024-12-11T22:58:55,504 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:55,504 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=c3bb275b22e0dd40b997269227cb02a0, regionState=CLOSED 2024-12-11T22:58:55,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure ed64762dccf3df815c33a9699a0e1643, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:58:55,506 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure c3bb275b22e0dd40b997269227cb02a0, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:58:55,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-12-11T22:58:55,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure ed64762dccf3df815c33a9699a0e1643, server=b762025f20c5,37563,1733957770731 in 169 msec 2024-12-11T22:58:55,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-12-11T22:58:55,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure c3bb275b22e0dd40b997269227cb02a0, server=b762025f20c5,33867,1733957770938 in 171 msec 2024-12-11T22:58:55,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=ed64762dccf3df815c33a9699a0e1643, UNASSIGN in 179 msec 2024-12-11T22:58:55,510 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-11T22:58:55,510 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c3bb275b22e0dd40b997269227cb02a0, UNASSIGN in 180 msec 2024-12-11T22:58:55,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-11T22:58:55,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 184 msec 2024-12-11T22:58:55,513 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957935512"}]},"ts":"1733957935512"} 2024-12-11T22:58:55,514 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-11T22:58:55,514 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-11T22:58:55,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 195 msec 2024-12-11T22:58:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T22:58:55,647 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T22:58:55,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-11T22:58:55,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,653 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-11T22:58:55,655 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,658 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-11T22:58:55,660 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:55,660 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:55,662 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/recovered.edits] 2024-12-11T22:58:55,662 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/recovered.edits] 2024-12-11T22:58:55,667 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf/e47e14b62c3945028a37e682c85f32a5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/cf/e47e14b62c3945028a37e682c85f32a5 2024-12-11T22:58:55,667 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf/80c84e7edddb46b98ec2ba4896141fea to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/cf/80c84e7edddb46b98ec2ba4896141fea 2024-12-11T22:58:55,670 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0/recovered.edits/9.seqid 2024-12-11T22:58:55,670 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643/recovered.edits/9.seqid 2024-12-11T22:58:55,671 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:55,671 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithTargetName/ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:55,671 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-11T22:58:55,671 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-11T22:58:55,672 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-11T22:58:55,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,677 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241211fa680e0417c247a5bc77ddfc7736ce0c_ed64762dccf3df815c33a9699a0e1643 2024-12-11T22:58:55,677 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211d445c91a1580478ebbc6b420ac58636e_c3bb275b22e0dd40b997269227cb02a0 2024-12-11T22:58:55,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T22:58:55,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T22:58:55,678 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-11T22:58:55,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T22:58:55,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T22:58:55,683 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:55,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:55,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-11T22:58:55,687 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-11T22:58:55,691 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-11T22:58:55,692 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,693 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-11T22:58:55,693 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957935693"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:55,693 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957935693"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:55,696 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T22:58:55,696 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ed64762dccf3df815c33a9699a0e1643, NAME => 'testtb-testExportWithTargetName,,1733957911942.ed64762dccf3df815c33a9699a0e1643.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c3bb275b22e0dd40b997269227cb02a0, NAME => 'testtb-testExportWithTargetName,1,1733957911942.c3bb275b22e0dd40b997269227cb02a0.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T22:58:55,696 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-11T22:58:55,696 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733957935696"}]},"ts":"9223372036854775807"} 2024-12-11T22:58:55,699 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-11T22:58:55,700 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T22:58:55,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 51 msec 2024-12-11T22:58:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-11T22:58:55,795 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-11T22:58:55,795 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T22:58:55,802 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-11T22:58:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-11T22:58:55,806 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-11T22:58:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-11T22:58:55,828 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=787 (was 761) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2197 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_830825791_1 at /127.0.0.1:58452 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:58474 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46339 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 116635) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:39063 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_830825791_1 at /127.0.0.1:48150 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:46339 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:48170 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39063 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:38070 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=789 (was 773) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=851 (was 852), ProcessCount=17 (was 17), AvailableMemoryMB=4100 (was 2787) - AvailableMemoryMB LEAK? - 2024-12-11T22:58:55,828 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-11T22:58:55,846 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=787, OpenFileDescriptor=789, MaxFileDescriptor=1048576, SystemLoadAverage=851, ProcessCount=17, AvailableMemoryMB=4099 2024-12-11T22:58:55,846 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-11T22:58:55,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:58:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:58:55,850 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:58:55,851 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-11T22:58:55,851 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:58:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T22:58:55,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741968_1144 (size=440) 2024-12-11T22:58:55,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741968_1144 (size=440) 2024-12-11T22:58:55,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741968_1144 (size=440) 2024-12-11T22:58:55,861 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bebe8ade099f68470b70428d8bb439d5, NAME => 'testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:55,862 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7864551aa5bf1a578798812c8017df26, NAME => 'testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:55,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741970_1146 (size=65) 2024-12-11T22:58:55,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741970_1146 (size=65) 2024-12-11T22:58:55,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741969_1145 (size=65) 2024-12-11T22:58:55,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741970_1146 (size=65) 2024-12-11T22:58:55,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741969_1145 (size=65) 2024-12-11T22:58:55,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741969_1145 (size=65) 2024-12-11T22:58:55,872 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 7864551aa5bf1a578798812c8017df26, disabling compactions & flushes 2024-12-11T22:58:55,873 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. after waiting 0 ms 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:55,873 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7864551aa5bf1a578798812c8017df26: Waiting for close lock at 1733957935873Disabling compacts and flushes for region at 1733957935873Disabling writes for close at 1733957935873Writing region close event to WAL at 1733957935873Closed at 1733957935873 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing bebe8ade099f68470b70428d8bb439d5, disabling compactions & flushes 2024-12-11T22:58:55,873 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. after waiting 0 ms 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:55,873 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:55,873 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for bebe8ade099f68470b70428d8bb439d5: Waiting for close lock at 1733957935873Disabling compacts and flushes for region at 1733957935873Disabling writes for close at 1733957935873Writing region close event to WAL at 1733957935873Closed at 1733957935873 2024-12-11T22:58:55,874 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:58:55,875 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733957935874"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957935874"}]},"ts":"1733957935874"} 2024-12-11T22:58:55,875 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733957935874"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957935874"}]},"ts":"1733957935874"} 2024-12-11T22:58:55,877 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T22:58:55,878 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:58:55,878 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957935878"}]},"ts":"1733957935878"} 2024-12-11T22:58:55,880 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-11T22:58:55,881 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:58:55,882 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:58:55,882 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:58:55,882 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:58:55,882 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:58:55,882 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:58:55,882 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:58:55,883 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:58:55,883 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:58:55,883 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:58:55,883 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:58:55,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, ASSIGN}] 2024-12-11T22:58:55,885 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, ASSIGN 2024-12-11T22:58:55,885 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, ASSIGN 2024-12-11T22:58:55,886 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T22:58:55,886 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T22:58:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T22:58:56,037 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:58:56,037 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=bebe8ade099f68470b70428d8bb439d5, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:56,037 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=7864551aa5bf1a578798812c8017df26, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:58:56,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, ASSIGN because future has completed 2024-12-11T22:58:56,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure bebe8ade099f68470b70428d8bb439d5, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:58:56,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, ASSIGN because future has completed 2024-12-11T22:58:56,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7864551aa5bf1a578798812c8017df26, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:58:56,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T22:58:56,198 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,198 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => bebe8ade099f68470b70428d8bb439d5, NAME => 'testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T22:58:56,199 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. service=AccessControlService 2024-12-11T22:58:56,199 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:58:56,199 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,199 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:56,199 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,199 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,200 INFO [StoreOpener-bebe8ade099f68470b70428d8bb439d5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,201 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:56,202 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 7864551aa5bf1a578798812c8017df26, NAME => 'testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T22:58:56,202 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. service=AccessControlService 2024-12-11T22:58:56,202 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:58:56,202 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,202 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:56,202 INFO [StoreOpener-bebe8ade099f68470b70428d8bb439d5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bebe8ade099f68470b70428d8bb439d5 columnFamilyName cf 2024-12-11T22:58:56,203 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,203 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,204 DEBUG [StoreOpener-bebe8ade099f68470b70428d8bb439d5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:56,204 INFO [StoreOpener-7864551aa5bf1a578798812c8017df26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,204 INFO [StoreOpener-bebe8ade099f68470b70428d8bb439d5-1 {}] regionserver.HStore(327): Store=bebe8ade099f68470b70428d8bb439d5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:58:56,205 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,205 INFO [StoreOpener-7864551aa5bf1a578798812c8017df26-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7864551aa5bf1a578798812c8017df26 columnFamilyName cf 2024-12-11T22:58:56,206 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,206 DEBUG [StoreOpener-7864551aa5bf1a578798812c8017df26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:56,206 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,206 INFO [StoreOpener-7864551aa5bf1a578798812c8017df26-1 {}] regionserver.HStore(327): Store=7864551aa5bf1a578798812c8017df26/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:58:56,206 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,206 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,206 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,207 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,207 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,207 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,208 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,208 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,209 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,210 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:58:56,210 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened bebe8ade099f68470b70428d8bb439d5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60600606, jitterRate=-0.09698060154914856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:58:56,210 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,211 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for bebe8ade099f68470b70428d8bb439d5: Running coprocessor pre-open hook at 1733957936199Writing region info on filesystem at 1733957936199Initializing all the Stores at 1733957936200 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957936200Cleaning up temporary data from old regions at 1733957936206 (+6 ms)Running coprocessor post-open hooks at 1733957936210 (+4 ms)Region opened successfully at 1733957936211 (+1 ms) 2024-12-11T22:58:56,212 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5., pid=66, masterSystemTime=1733957936195 2024-12-11T22:58:56,212 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:58:56,212 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 7864551aa5bf1a578798812c8017df26; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64943419, jitterRate=-0.03226764500141144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:58:56,212 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,213 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 7864551aa5bf1a578798812c8017df26: Running coprocessor pre-open hook at 1733957936203Writing region info on filesystem at 1733957936203Initializing all the Stores at 1733957936203Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957936203Cleaning up temporary data from old regions at 1733957936208 (+5 ms)Running coprocessor post-open hooks at 1733957936212 (+4 ms)Region opened successfully at 1733957936213 (+1 ms) 2024-12-11T22:58:56,213 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26., pid=67, masterSystemTime=1733957936197 2024-12-11T22:58:56,213 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,214 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,214 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=bebe8ade099f68470b70428d8bb439d5, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:56,214 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:56,215 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:56,215 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=7864551aa5bf1a578798812c8017df26, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:58:56,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure bebe8ade099f68470b70428d8bb439d5, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:58:56,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7864551aa5bf1a578798812c8017df26, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:58:56,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-11T22:58:56,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure bebe8ade099f68470b70428d8bb439d5, server=b762025f20c5,37563,1733957770731 in 176 msec 2024-12-11T22:58:56,220 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-11T22:58:56,220 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, ASSIGN in 336 msec 2024-12-11T22:58:56,220 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 7864551aa5bf1a578798812c8017df26, server=b762025f20c5,41635,1733957771023 in 175 msec 2024-12-11T22:58:56,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-12-11T22:58:56,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, ASSIGN in 337 msec 2024-12-11T22:58:56,223 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:58:56,223 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957936223"}]},"ts":"1733957936223"} 2024-12-11T22:58:56,224 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-11T22:58:56,225 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:58:56,226 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-11T22:58:56,229 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T22:58:56,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:56,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:56,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:56,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:56,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:56,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:56,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:56,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:56,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 425 msec 2024-12-11T22:58:56,305 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000003/launch_container.sh] 2024-12-11T22:58:56,305 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000003/container_tokens] 2024-12-11T22:58:56,305 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000003/sysfs] 2024-12-11T22:58:56,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T22:58:56,477 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T22:58:56,477 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,483 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-11T22:58:56,483 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,483 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:58:56,486 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,496 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,501 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T22:58:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957936504 (current time:1733957936504). 2024-12-11T22:58:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:58:56,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T22:58:56,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:58:56,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f0b93a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:56,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:56,507 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:56,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:56,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:56,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@549024f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:56,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:56,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,509 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50648, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:56,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4622e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:56,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:56,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:56,513 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44312, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:56,514 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:58:56,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:56,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,515 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:56,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d0ae3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:56,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:56,520 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:56,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:56,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:56,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42638995, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:56,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:56,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,522 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:56,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42b60078, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:56,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:56,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:56,526 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44320, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:56,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:58:56,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:56,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:56,532 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:58:56,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:56,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,532 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:56,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T22:58:56,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:58:56,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T22:58:56,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-11T22:58:56,535 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:58:56,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T22:58:56,536 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:58:56,538 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:58:56,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741971_1147 (size=161) 2024-12-11T22:58:56,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741971_1147 (size=161) 2024-12-11T22:58:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741971_1147 (size=161) 2024-12-11T22:58:56,576 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:58:56,576 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26}] 2024-12-11T22:58:56,577 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,577 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T22:58:56,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-11T22:58:56,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-11T22:58:56,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 7864551aa5bf1a578798812c8017df26: 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for bebe8ade099f68470b70428d8bb439d5: 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:56,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:58:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741972_1148 (size=68) 2024-12-11T22:58:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741973_1149 (size=68) 2024-12-11T22:58:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741973_1149 (size=68) 2024-12-11T22:58:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741972_1148 (size=68) 2024-12-11T22:58:56,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741973_1149 (size=68) 2024-12-11T22:58:56,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:56,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741972_1148 (size=68) 2024-12-11T22:58:56,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-11T22:58:56,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-11T22:58:56,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-11T22:58:56,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-11T22:58:56,738 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:56,738 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,738 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26 in 162 msec 2024-12-11T22:58:56,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-11T22:58:56,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5 in 162 msec 2024-12-11T22:58:56,740 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:58:56,741 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:58:56,742 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:58:56,742 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:58:56,742 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:56,742 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T22:58:56,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741974_1150 (size=60) 2024-12-11T22:58:56,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741974_1150 (size=60) 2024-12-11T22:58:56,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741974_1150 (size=60) 2024-12-11T22:58:56,749 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:58:56,749 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-11T22:58:56,750 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-11T22:58:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741975_1151 (size=641) 2024-12-11T22:58:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741975_1151 (size=641) 2024-12-11T22:58:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741975_1151 (size=641) 2024-12-11T22:58:56,766 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:58:56,771 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:58:56,772 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-11T22:58:56,773 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:58:56,773 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-11T22:58:56,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 241 msec 2024-12-11T22:58:56,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T22:58:56,855 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T22:58:56,866 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:58:56,868 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:58:56,869 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-11T22:58:56,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:56,872 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:58:56,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,878 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,884 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:56,886 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T22:58:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957936886 (current time:1733957936886). 2024-12-11T22:58:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:58:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T22:58:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:58:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@385ef224, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:56,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:56,888 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:56,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:56,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:56,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f03f4e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:56,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:56,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,890 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:56,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a699db4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:56,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:56,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:56,892 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:56,893 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:58:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,894 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6701ff50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:56,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:56,896 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:56,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:56,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:56,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@365b71c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:56,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:56,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,897 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:56,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33367f18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:56,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:56,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:56,900 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:56,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:58:56,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:56,902 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35080, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:56,903 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:58:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:56,904 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T22:58:56,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:58:56,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T22:58:56,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-11T22:58:56,906 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:58:56,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T22:58:56,907 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:58:56,909 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:58:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741976_1152 (size=156) 2024-12-11T22:58:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741976_1152 (size=156) 2024-12-11T22:58:56,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741976_1152 (size=156) 2024-12-11T22:58:56,918 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:58:56,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26}] 2024-12-11T22:58:56,919 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:56,919 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:57,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T22:58:57,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-11T22:58:57,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-11T22:58:57,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:57,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:57,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing bebe8ade099f68470b70428d8bb439d5 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-11T22:58:57,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 7864551aa5bf1a578798812c8017df26 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-11T22:58:57,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5 is 71, key is 0565ebde5ac1d440cd59de0b8d51c52d/cf:q/1733957936866/Put/seqid=0 2024-12-11T22:58:57,093 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26 is 71, key is 1853563f23435d6dad1a8c2bdccb3a94/cf:q/1733957936868/Put/seqid=0 2024-12-11T22:58:57,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741978_1154 (size=7891) 2024-12-11T22:58:57,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741977_1153 (size=5381) 2024-12-11T22:58:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741978_1154 (size=7891) 2024-12-11T22:58:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741977_1153 (size=5381) 2024-12-11T22:58:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741977_1153 (size=5381) 2024-12-11T22:58:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741978_1154 (size=7891) 2024-12-11T22:58:57,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:57,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:57,119 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:57,119 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:57,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/.tmp/cf/19eb120219f644ad8d34cf7ab422fc17, store: [table=testtb-testExportWithResetTtl family=cf region=bebe8ade099f68470b70428d8bb439d5] 2024-12-11T22:58:57,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/.tmp/cf/38f41a6ef8d24f3daf1d0e6b70266dec, store: [table=testtb-testExportWithResetTtl family=cf region=7864551aa5bf1a578798812c8017df26] 2024-12-11T22:58:57,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/.tmp/cf/38f41a6ef8d24f3daf1d0e6b70266dec is 206, key is 18aeafb58b5fdebb25eb7e2834a2101a7/cf:q/1733957936868/Put/seqid=0 2024-12-11T22:58:57,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/.tmp/cf/19eb120219f644ad8d34cf7ab422fc17 is 206, key is 0a77acee85458fdb41b27fbfb82edb84d/cf:q/1733957936866/Put/seqid=0 2024-12-11T22:58:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741980_1156 (size=6720) 2024-12-11T22:58:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741980_1156 (size=6720) 2024-12-11T22:58:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741980_1156 (size=6720) 2024-12-11T22:58:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741979_1155 (size=14045) 2024-12-11T22:58:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741979_1155 (size=14045) 2024-12-11T22:58:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741979_1155 (size=14045) 2024-12-11T22:58:57,130 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/.tmp/cf/19eb120219f644ad8d34cf7ab422fc17 2024-12-11T22:58:57,131 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/.tmp/cf/38f41a6ef8d24f3daf1d0e6b70266dec 2024-12-11T22:58:57,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/.tmp/cf/19eb120219f644ad8d34cf7ab422fc17 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf/19eb120219f644ad8d34cf7ab422fc17 2024-12-11T22:58:57,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/.tmp/cf/38f41a6ef8d24f3daf1d0e6b70266dec as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf/38f41a6ef8d24f3daf1d0e6b70266dec 2024-12-11T22:58:57,143 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf/19eb120219f644ad8d34cf7ab422fc17, entries=7, sequenceid=6, filesize=6.6 K 2024-12-11T22:58:57,143 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf/38f41a6ef8d24f3daf1d0e6b70266dec, entries=43, sequenceid=6, filesize=13.7 K 2024-12-11T22:58:57,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for bebe8ade099f68470b70428d8bb439d5 in 72ms, sequenceid=6, compaction requested=false 2024-12-11T22:58:57,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 7864551aa5bf1a578798812c8017df26 in 72ms, sequenceid=6, compaction requested=false 2024-12-11T22:58:57,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-11T22:58:57,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-11T22:58:57,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for bebe8ade099f68470b70428d8bb439d5: 2024-12-11T22:58:57,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 7864551aa5bf1a578798812c8017df26: 2024-12-11T22:58:57,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. for snaptb0-testExportWithResetTtl completed. 2024-12-11T22:58:57,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. for snaptb0-testExportWithResetTtl completed. 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf/38f41a6ef8d24f3daf1d0e6b70266dec] hfiles 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf/19eb120219f644ad8d34cf7ab422fc17] hfiles 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf/38f41a6ef8d24f3daf1d0e6b70266dec for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf/19eb120219f644ad8d34cf7ab422fc17 for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741982_1158 (size=107) 2024-12-11T22:58:57,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741981_1157 (size=107) 2024-12-11T22:58:57,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741982_1158 (size=107) 2024-12-11T22:58:57,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741981_1157 (size=107) 2024-12-11T22:58:57,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741982_1158 (size=107) 2024-12-11T22:58:57,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741981_1157 (size=107) 2024-12-11T22:58:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:58:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:58:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-11T22:58:57,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-11T22:58:57,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-11T22:58:57,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-11T22:58:57,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:57,156 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:57,156 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:57,156 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:57,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7864551aa5bf1a578798812c8017df26 in 239 msec 2024-12-11T22:58:57,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=71 2024-12-11T22:58:57,159 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:58:57,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bebe8ade099f68470b70428d8bb439d5 in 239 msec 2024-12-11T22:58:57,160 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:58:57,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:58:57,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:58:57,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:57,162 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5] hfiles 2024-12-11T22:58:57,162 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26 2024-12-11T22:58:57,162 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:58:57,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741983_1159 (size=291) 2024-12-11T22:58:57,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741983_1159 (size=291) 2024-12-11T22:58:57,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741983_1159 (size=291) 2024-12-11T22:58:57,169 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:58:57,169 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,170 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741984_1160 (size=951) 2024-12-11T22:58:57,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741984_1160 (size=951) 2024-12-11T22:58:57,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741984_1160 (size=951) 2024-12-11T22:58:57,180 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:58:57,186 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:58:57,186 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-11T22:58:57,187 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:58:57,187 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-11T22:58:57,188 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 282 msec 2024-12-11T22:58:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T22:58:57,225 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T22:58:57,226 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:58:57,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-11T22:58:57,227 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:58:57,227 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-11T22:58:57,228 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:58:57,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T22:58:57,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741985_1161 (size=433) 2024-12-11T22:58:57,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741985_1161 (size=433) 2024-12-11T22:58:57,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741985_1161 (size=433) 2024-12-11T22:58:57,238 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f655ccde0c73fa3cfc29fca4186b0959, NAME => 'testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:57,238 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e79b5789ee005872549f41003f60fa27, NAME => 'testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:57,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741986_1162 (size=58) 2024-12-11T22:58:57,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741986_1162 (size=58) 2024-12-11T22:58:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741987_1163 (size=58) 2024-12-11T22:58:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741986_1162 (size=58) 2024-12-11T22:58:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741987_1163 (size=58) 2024-12-11T22:58:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741987_1163 (size=58) 2024-12-11T22:58:57,247 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:57,247 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing f655ccde0c73fa3cfc29fca4186b0959, disabling compactions & flushes 2024-12-11T22:58:57,248 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. after waiting 0 ms 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,248 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for f655ccde0c73fa3cfc29fca4186b0959: Waiting for close lock at 1733957937247Disabling compacts and flushes for region at 1733957937247Disabling writes for close at 1733957937248 (+1 ms)Writing region close event to WAL at 1733957937248Closed at 1733957937248 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing e79b5789ee005872549f41003f60fa27, disabling compactions & flushes 2024-12-11T22:58:57,248 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. after waiting 0 ms 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,248 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for e79b5789ee005872549f41003f60fa27: Waiting for close lock at 1733957937248Disabling compacts and flushes for region at 1733957937248Disabling writes for close at 1733957937248Writing region close event to WAL at 1733957937248Closed at 1733957937248 2024-12-11T22:58:57,250 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:58:57,250 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733957937250"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957937250"}]},"ts":"1733957937250"} 2024-12-11T22:58:57,250 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733957937250"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957937250"}]},"ts":"1733957937250"} 2024-12-11T22:58:57,253 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T22:58:57,254 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:58:57,254 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957937254"}]},"ts":"1733957937254"} 2024-12-11T22:58:57,256 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-11T22:58:57,256 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:58:57,258 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:58:57,258 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:58:57,258 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:58:57,258 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:58:57,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, ASSIGN}] 2024-12-11T22:58:57,260 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, ASSIGN 2024-12-11T22:58:57,260 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, ASSIGN 2024-12-11T22:58:57,261 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T22:58:57,261 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T22:58:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T22:58:57,412 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:58:57,412 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=e79b5789ee005872549f41003f60fa27, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:58:57,412 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=f655ccde0c73fa3cfc29fca4186b0959, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:57,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, ASSIGN because future has completed 2024-12-11T22:58:57,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure f655ccde0c73fa3cfc29fca4186b0959, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:58:57,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, ASSIGN because future has completed 2024-12-11T22:58:57,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure e79b5789ee005872549f41003f60fa27, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:58:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T22:58:57,569 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,569 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => f655ccde0c73fa3cfc29fca4186b0959, NAME => 'testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T22:58:57,570 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. service=AccessControlService 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => e79b5789ee005872549f41003f60fa27, NAME => 'testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T22:58:57,570 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. service=AccessControlService 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,570 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,570 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:58:57,571 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,571 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,572 INFO [StoreOpener-f655ccde0c73fa3cfc29fca4186b0959-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,572 INFO [StoreOpener-e79b5789ee005872549f41003f60fa27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,573 INFO [StoreOpener-f655ccde0c73fa3cfc29fca4186b0959-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f655ccde0c73fa3cfc29fca4186b0959 columnFamilyName cf 2024-12-11T22:58:57,573 INFO [StoreOpener-e79b5789ee005872549f41003f60fa27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e79b5789ee005872549f41003f60fa27 columnFamilyName cf 2024-12-11T22:58:57,574 DEBUG [StoreOpener-f655ccde0c73fa3cfc29fca4186b0959-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:57,574 DEBUG [StoreOpener-e79b5789ee005872549f41003f60fa27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:57,574 INFO [StoreOpener-f655ccde0c73fa3cfc29fca4186b0959-1 {}] regionserver.HStore(327): Store=f655ccde0c73fa3cfc29fca4186b0959/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:58:57,574 INFO [StoreOpener-e79b5789ee005872549f41003f60fa27-1 {}] regionserver.HStore(327): Store=e79b5789ee005872549f41003f60fa27/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:58:57,575 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,575 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,576 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,578 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,578 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,580 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:58:57,580 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:58:57,581 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened e79b5789ee005872549f41003f60fa27; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74426778, jitterRate=0.10904541611671448}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:58:57,580 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened f655ccde0c73fa3cfc29fca4186b0959; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69049929, jitterRate=0.0289241224527359}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:58:57,581 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,581 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:57,581 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for f655ccde0c73fa3cfc29fca4186b0959: Running coprocessor pre-open hook at 1733957937570Writing region info on filesystem at 1733957937570Initializing all the Stores at 1733957937571 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957937571Cleaning up temporary data from old regions at 1733957937576 (+5 ms)Running coprocessor post-open hooks at 1733957937581 (+5 ms)Region opened successfully at 1733957937581 2024-12-11T22:58:57,581 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for e79b5789ee005872549f41003f60fa27: Running coprocessor pre-open hook at 1733957937571Writing region info on filesystem at 1733957937571Initializing all the Stores at 1733957937571Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957937571Cleaning up temporary data from old regions at 1733957937576 (+5 ms)Running coprocessor post-open hooks at 1733957937581 (+5 ms)Region opened successfully at 1733957937581 2024-12-11T22:58:57,582 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959., pid=77, masterSystemTime=1733957937566 2024-12-11T22:58:57,582 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27., pid=78, masterSystemTime=1733957937567 2024-12-11T22:58:57,585 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,585 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:57,586 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=f655ccde0c73fa3cfc29fca4186b0959, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:58:57,586 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,586 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,588 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=e79b5789ee005872549f41003f60fa27, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:58:57,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure f655ccde0c73fa3cfc29fca4186b0959, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:58:57,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure e79b5789ee005872549f41003f60fa27, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:58:57,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-12-11T22:58:57,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure f655ccde0c73fa3cfc29fca4186b0959, server=b762025f20c5,37563,1733957770731 in 175 msec 2024-12-11T22:58:57,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, ASSIGN in 334 msec 2024-12-11T22:58:57,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-12-11T22:58:57,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure e79b5789ee005872549f41003f60fa27, server=b762025f20c5,33867,1733957770938 in 178 msec 2024-12-11T22:58:57,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-12-11T22:58:57,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, ASSIGN in 337 msec 2024-12-11T22:58:57,598 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:58:57,598 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957937598"}]},"ts":"1733957937598"} 2024-12-11T22:58:57,600 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-11T22:58:57,601 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:58:57,601 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-11T22:58:57,604 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T22:58:57,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:57,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:57,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:57,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:58:57,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,670 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,670 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,670 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,670 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:58:57,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 442 msec 2024-12-11T22:58:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T22:58:57,855 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-11T22:58:57,855 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,858 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-11T22:58:57,858 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,858 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:58:57,859 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,864 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,869 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:58:57,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:58:57,879 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,881 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-11T22:58:57,881 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:57,882 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:58:57,883 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,888 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,893 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T22:58:57,895 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-11T22:58:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957937895 (current time:1733957937895). 2024-12-11T22:58:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T22:58:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:58:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5088bdf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:57,897 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:57,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:57,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:57,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52c77e14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:57,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:57,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:57,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:57,899 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:57,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ba7f325, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:57,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:57,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:57,902 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:57,903 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:58:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:57,903 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8078fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:58:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:58:57,905 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:58:57,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:58:57,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:58:57,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e32282b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:57,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:58:57,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:58:57,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:57,907 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53090, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:58:57,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51700415, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:58:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:58:57,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:58:57,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:57,911 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:57,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:58:57,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:58:57,914 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51870, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:58:57,915 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:58:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:58:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:58:57,915 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:58:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T22:58:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:58:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-11T22:58:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-11T22:58:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T22:58:57,919 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:58:57,920 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:58:57,923 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:58:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741988_1164 (size=143) 2024-12-11T22:58:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741988_1164 (size=143) 2024-12-11T22:58:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741988_1164 (size=143) 2024-12-11T22:58:57,937 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:58:57,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79b5789ee005872549f41003f60fa27}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f655ccde0c73fa3cfc29fca4186b0959}] 2024-12-11T22:58:57,938 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:57,938 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:58,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T22:58:58,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-11T22:58:58,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-11T22:58:58,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:58,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:58,090 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing e79b5789ee005872549f41003f60fa27 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-11T22:58:58,090 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing f655ccde0c73fa3cfc29fca4186b0959 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-11T22:58:58,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27 is 71, key is 02e58d184f19c15ad50bcfc2f971a929/cf:q/1733957937876/Put/seqid=0 2024-12-11T22:58:58,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959 is 71, key is 112f543cf688bf2072381d400ed37741/cf:q/1733957937878/Put/seqid=0 2024-12-11T22:58:58,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741989_1165 (size=5102) 2024-12-11T22:58:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741989_1165 (size=5102) 2024-12-11T22:58:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741989_1165 (size=5102) 2024-12-11T22:58:58,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:58,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741990_1166 (size=8171) 2024-12-11T22:58:58,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741990_1166 (size=8171) 2024-12-11T22:58:58,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741990_1166 (size=8171) 2024-12-11T22:58:58,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:58,127 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:58,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/.tmp/cf/59376c162ede4703af00cce86112213c, store: [table=testExportWithResetTtl family=cf region=e79b5789ee005872549f41003f60fa27] 2024-12-11T22:58:58,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/.tmp/cf/59376c162ede4703af00cce86112213c is 199, key is 0079f563131c01b8fe291ed267b6a0ab4/cf:q/1733957937876/Put/seqid=0 2024-12-11T22:58:58,133 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:58,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/.tmp/cf/f54b58f3ff1349cba0aead6875a590f6, store: [table=testExportWithResetTtl family=cf region=f655ccde0c73fa3cfc29fca4186b0959] 2024-12-11T22:58:58,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/.tmp/cf/f54b58f3ff1349cba0aead6875a590f6 is 199, key is 182d69be93436262cfb6909d68d198abf/cf:q/1733957937878/Put/seqid=0 2024-12-11T22:58:58,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741991_1167 (size=5878) 2024-12-11T22:58:58,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741991_1167 (size=5878) 2024-12-11T22:58:58,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741991_1167 (size=5878) 2024-12-11T22:58:58,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/.tmp/cf/59376c162ede4703af00cce86112213c 2024-12-11T22:58:58,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741992_1168 (size=14517) 2024-12-11T22:58:58,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741992_1168 (size=14517) 2024-12-11T22:58:58,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/.tmp/cf/59376c162ede4703af00cce86112213c as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf/59376c162ede4703af00cce86112213c 2024-12-11T22:58:58,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741992_1168 (size=14517) 2024-12-11T22:58:58,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/.tmp/cf/f54b58f3ff1349cba0aead6875a590f6 2024-12-11T22:58:58,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/.tmp/cf/f54b58f3ff1349cba0aead6875a590f6 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf/f54b58f3ff1349cba0aead6875a590f6 2024-12-11T22:58:58,152 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf/59376c162ede4703af00cce86112213c, entries=3, sequenceid=5, filesize=5.7 K 2024-12-11T22:58:58,153 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for e79b5789ee005872549f41003f60fa27 in 63ms, sequenceid=5, compaction requested=false 2024-12-11T22:58:58,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-11T22:58:58,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for e79b5789ee005872549f41003f60fa27: 2024-12-11T22:58:58,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. for snaptb-testExportWithResetTtl completed. 2024-12-11T22:58:58,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-11T22:58:58,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:58,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf/59376c162ede4703af00cce86112213c] hfiles 2024-12-11T22:58:58,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf/59376c162ede4703af00cce86112213c for snapshot=snaptb-testExportWithResetTtl 2024-12-11T22:58:58,158 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf/f54b58f3ff1349cba0aead6875a590f6, entries=47, sequenceid=5, filesize=14.2 K 2024-12-11T22:58:58,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for f655ccde0c73fa3cfc29fca4186b0959 in 68ms, sequenceid=5, compaction requested=false 2024-12-11T22:58:58,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for f655ccde0c73fa3cfc29fca4186b0959: 2024-12-11T22:58:58,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. for snaptb-testExportWithResetTtl completed. 2024-12-11T22:58:58,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-11T22:58:58,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:58:58,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf/f54b58f3ff1349cba0aead6875a590f6] hfiles 2024-12-11T22:58:58,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf/f54b58f3ff1349cba0aead6875a590f6 for snapshot=snaptb-testExportWithResetTtl 2024-12-11T22:58:58,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741993_1169 (size=100) 2024-12-11T22:58:58,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741993_1169 (size=100) 2024-12-11T22:58:58,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741993_1169 (size=100) 2024-12-11T22:58:58,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:58:58,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-11T22:58:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-11T22:58:58,162 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:58,162 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:58,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e79b5789ee005872549f41003f60fa27 in 226 msec 2024-12-11T22:58:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741994_1170 (size=100) 2024-12-11T22:58:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741994_1170 (size=100) 2024-12-11T22:58:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741994_1170 (size=100) 2024-12-11T22:58:58,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:58:58,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-11T22:58:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-11T22:58:58,167 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:58,167 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:58,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-11T22:58:58,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f655ccde0c73fa3cfc29fca4186b0959 in 231 msec 2024-12-11T22:58:58,170 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:58:58,170 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:58:58,171 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:58:58,171 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:58:58,171 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:58:58,172 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27] hfiles 2024-12-11T22:58:58,172 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:58:58,172 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27 2024-12-11T22:58:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741995_1171 (size=284) 2024-12-11T22:58:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741995_1171 (size=284) 2024-12-11T22:58:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741995_1171 (size=284) 2024-12-11T22:58:58,180 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:58:58,180 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-11T22:58:58,180 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T22:58:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741996_1172 (size=923) 2024-12-11T22:58:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741996_1172 (size=923) 2024-12-11T22:58:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741996_1172 (size=923) 2024-12-11T22:58:58,196 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:58:58,202 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:58:58,203 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T22:58:58,204 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:58:58,204 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-11T22:58:58,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 288 msec 2024-12-11T22:58:58,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T22:58:58,235 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-11T22:58:58,244 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244 2024-12-11T22:58:58,244 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:58,269 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:58:58,269 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T22:58:58,270 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T22:58:58,275 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T22:58:58,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741997_1173 (size=143) 2024-12-11T22:58:58,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741997_1173 (size=143) 2024-12-11T22:58:58,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741997_1173 (size=143) 2024-12-11T22:58:58,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741998_1174 (size=923) 2024-12-11T22:58:58,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741998_1174 (size=923) 2024-12-11T22:58:58,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741998_1174 (size=923) 2024-12-11T22:58:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741999_1175 (size=141) 2024-12-11T22:58:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741999_1175 (size=141) 2024-12-11T22:58:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741999_1175 (size=141) 2024-12-11T22:58:58,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:58,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:58,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-15141556740580147153.jar 2024-12-11T22:58:59,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-9723622340012482178.jar 2024-12-11T22:58:59,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:58:59,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T22:58:59,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T22:58:59,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T22:58:59,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T22:58:59,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T22:58:59,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T22:58:59,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T22:58:59,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T22:58:59,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T22:58:59,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T22:58:59,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T22:58:59,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:59,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:59,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:58:59,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:59,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:58:59,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:58:59,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:58:59,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742000_1176 (size=131440) 2024-12-11T22:58:59,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742000_1176 (size=131440) 2024-12-11T22:58:59,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742000_1176 (size=131440) 2024-12-11T22:58:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742001_1177 (size=4188619) 2024-12-11T22:58:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742001_1177 (size=4188619) 2024-12-11T22:58:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742001_1177 (size=4188619) 2024-12-11T22:58:59,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742002_1178 (size=1323991) 2024-12-11T22:58:59,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742002_1178 (size=1323991) 2024-12-11T22:58:59,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742002_1178 (size=1323991) 2024-12-11T22:58:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742003_1179 (size=903541) 2024-12-11T22:58:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742003_1179 (size=903541) 2024-12-11T22:58:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742003_1179 (size=903541) 2024-12-11T22:58:59,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742004_1180 (size=8360360) 2024-12-11T22:58:59,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742004_1180 (size=8360360) 2024-12-11T22:58:59,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742004_1180 (size=8360360) 2024-12-11T22:58:59,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742005_1181 (size=1877034) 2024-12-11T22:58:59,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742005_1181 (size=1877034) 2024-12-11T22:58:59,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742005_1181 (size=1877034) 2024-12-11T22:58:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742006_1182 (size=77835) 2024-12-11T22:58:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742006_1182 (size=77835) 2024-12-11T22:58:59,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742006_1182 (size=77835) 2024-12-11T22:58:59,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742007_1183 (size=30949) 2024-12-11T22:58:59,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742007_1183 (size=30949) 2024-12-11T22:58:59,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742007_1183 (size=30949) 2024-12-11T22:58:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742008_1184 (size=6426222) 2024-12-11T22:58:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742008_1184 (size=6426222) 2024-12-11T22:58:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742008_1184 (size=6426222) 2024-12-11T22:58:59,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742009_1185 (size=1597213) 2024-12-11T22:58:59,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742009_1185 (size=1597213) 2024-12-11T22:58:59,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742009_1185 (size=1597213) 2024-12-11T22:58:59,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742010_1186 (size=4695811) 2024-12-11T22:58:59,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742010_1186 (size=4695811) 2024-12-11T22:58:59,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742010_1186 (size=4695811) 2024-12-11T22:58:59,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742011_1187 (size=232957) 2024-12-11T22:58:59,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742011_1187 (size=232957) 2024-12-11T22:58:59,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742011_1187 (size=232957) 2024-12-11T22:58:59,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742012_1188 (size=127628) 2024-12-11T22:58:59,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742012_1188 (size=127628) 2024-12-11T22:58:59,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742012_1188 (size=127628) 2024-12-11T22:58:59,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742013_1189 (size=20406) 2024-12-11T22:58:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742013_1189 (size=20406) 2024-12-11T22:58:59,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742013_1189 (size=20406) 2024-12-11T22:58:59,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742014_1190 (size=5175431) 2024-12-11T22:58:59,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742014_1190 (size=5175431) 2024-12-11T22:58:59,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742014_1190 (size=5175431) 2024-12-11T22:58:59,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742015_1191 (size=217634) 2024-12-11T22:58:59,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742015_1191 (size=217634) 2024-12-11T22:58:59,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742015_1191 (size=217634) 2024-12-11T22:58:59,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742016_1192 (size=1832290) 2024-12-11T22:58:59,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742016_1192 (size=1832290) 2024-12-11T22:58:59,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742016_1192 (size=1832290) 2024-12-11T22:58:59,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742017_1193 (size=443172) 2024-12-11T22:58:59,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742017_1193 (size=443172) 2024-12-11T22:58:59,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742017_1193 (size=443172) 2024-12-11T22:58:59,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742018_1194 (size=322274) 2024-12-11T22:58:59,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742018_1194 (size=322274) 2024-12-11T22:58:59,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742018_1194 (size=322274) 2024-12-11T22:58:59,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742019_1195 (size=503880) 2024-12-11T22:58:59,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742019_1195 (size=503880) 2024-12-11T22:58:59,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742019_1195 (size=503880) 2024-12-11T22:58:59,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742020_1196 (size=29229) 2024-12-11T22:58:59,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742020_1196 (size=29229) 2024-12-11T22:58:59,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742020_1196 (size=29229) 2024-12-11T22:58:59,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742021_1197 (size=24096) 2024-12-11T22:58:59,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742021_1197 (size=24096) 2024-12-11T22:58:59,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742021_1197 (size=24096) 2024-12-11T22:58:59,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742022_1198 (size=111872) 2024-12-11T22:58:59,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742022_1198 (size=111872) 2024-12-11T22:58:59,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742022_1198 (size=111872) 2024-12-11T22:58:59,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742023_1199 (size=45609) 2024-12-11T22:58:59,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742023_1199 (size=45609) 2024-12-11T22:58:59,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742023_1199 (size=45609) 2024-12-11T22:58:59,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742024_1200 (size=136454) 2024-12-11T22:58:59,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742024_1200 (size=136454) 2024-12-11T22:58:59,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742024_1200 (size=136454) 2024-12-11T22:58:59,597 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T22:58:59,599 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-11T22:58:59,602 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-12-11T22:58:59,602 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T22:58:59,602 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.7 K 2024-12-11T22:58:59,602 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T22:58:59,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742025_1201 (size=995) 2024-12-11T22:58:59,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742025_1201 (size=995) 2024-12-11T22:58:59,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742025_1201 (size=995) 2024-12-11T22:58:59,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742026_1202 (size=35) 2024-12-11T22:58:59,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742026_1202 (size=35) 2024-12-11T22:58:59,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742026_1202 (size=35) 2024-12-11T22:58:59,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742027_1203 (size=303992) 2024-12-11T22:58:59,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742027_1203 (size=303992) 2024-12-11T22:58:59,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742027_1203 (size=303992) 2024-12-11T22:58:59,732 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:58:59,732 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:58:59,735 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0002_000001 (auth:SIMPLE) from 127.0.0.1:47110 2024-12-11T22:58:59,743 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000001/launch_container.sh] 2024-12-11T22:58:59,744 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000001/container_tokens] 2024-12-11T22:58:59,744 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0002/container_1733957780200_0002_01_000001/sysfs] 2024-12-11T22:59:00,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-11T22:59:00,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-11T22:59:00,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-11T22:59:00,120 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-11T22:59:00,121 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-11T22:59:00,609 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:45160 2024-12-11T22:59:01,031 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:59:06,054 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:44606 2024-12-11T22:59:06,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742028_1204 (size=349690) 2024-12-11T22:59:06,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742028_1204 (size=349690) 2024-12-11T22:59:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742028_1204 (size=349690) 2024-12-11T22:59:08,448 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:56142 2024-12-11T22:59:08,450 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:36064 2024-12-11T22:59:08,684 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:59:09,185 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:56148 2024-12-11T22:59:09,190 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:36072 2024-12-11T22:59:11,737 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T22:59:14,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742029_1205 (size=14517) 2024-12-11T22:59:14,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742029_1205 (size=14517) 2024-12-11T22:59:14,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742029_1205 (size=14517) 2024-12-11T22:59:15,027 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000002/launch_container.sh] 2024-12-11T22:59:15,027 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000002/container_tokens] 2024-12-11T22:59:15,027 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000002/sysfs] 2024-12-11T22:59:15,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742031_1207 (size=5878) 2024-12-11T22:59:15,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742031_1207 (size=5878) 2024-12-11T22:59:15,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742031_1207 (size=5878) 2024-12-11T22:59:15,704 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000004/launch_container.sh] 2024-12-11T22:59:15,704 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000004/container_tokens] 2024-12-11T22:59:15,704 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000004/sysfs] 2024-12-11T22:59:16,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742032_1208 (size=8171) 2024-12-11T22:59:16,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742032_1208 (size=8171) 2024-12-11T22:59:16,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742032_1208 (size=8171) 2024-12-11T22:59:16,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742033_1209 (size=5102) 2024-12-11T22:59:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742033_1209 (size=5102) 2024-12-11T22:59:16,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742033_1209 (size=5102) 2024-12-11T22:59:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742030_1206 (size=31705) 2024-12-11T22:59:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742030_1206 (size=31705) 2024-12-11T22:59:16,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742030_1206 (size=31705) 2024-12-11T22:59:16,927 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000003/launch_container.sh] 2024-12-11T22:59:16,927 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000003/container_tokens] 2024-12-11T22:59:16,927 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000003/sysfs] 2024-12-11T22:59:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742034_1210 (size=462) 2024-12-11T22:59:16,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742034_1210 (size=462) 2024-12-11T22:59:16,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742034_1210 (size=462) 2024-12-11T22:59:16,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742035_1211 (size=31705) 2024-12-11T22:59:16,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742035_1211 (size=31705) 2024-12-11T22:59:16,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742035_1211 (size=31705) 2024-12-11T22:59:16,988 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000005/launch_container.sh] 2024-12-11T22:59:16,988 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000005/container_tokens] 2024-12-11T22:59:16,988 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000005/sysfs] 2024-12-11T22:59:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742036_1212 (size=349690) 2024-12-11T22:59:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742036_1212 (size=349690) 2024-12-11T22:59:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742036_1212 (size=349690) 2024-12-11T22:59:17,001 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:56154 2024-12-11T22:59:17,006 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:56158 2024-12-11T22:59:18,840 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T22:59:18,841 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T22:59:18,852 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-11T22:59:18,852 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T22:59:18,853 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T22:59:18,853 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T22:59:18,854 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-11T22:59:18,854 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-11T22:59:18,854 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T22:59:18,854 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-11T22:59:18,854 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957938244/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-11T22:59:18,870 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-11T22:59:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T22:59:18,874 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957958874"}]},"ts":"1733957958874"} 2024-12-11T22:59:18,877 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-11T22:59:18,877 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-11T22:59:18,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-11T22:59:18,880 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, UNASSIGN}] 2024-12-11T22:59:18,882 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, UNASSIGN 2024-12-11T22:59:18,882 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, UNASSIGN 2024-12-11T22:59:18,883 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=e79b5789ee005872549f41003f60fa27, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:59:18,883 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=f655ccde0c73fa3cfc29fca4186b0959, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:59:18,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, UNASSIGN because future has completed 2024-12-11T22:59:18,886 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:59:18,886 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure e79b5789ee005872549f41003f60fa27, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:59:18,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, UNASSIGN because future has completed 2024-12-11T22:59:18,887 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:59:18,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure f655ccde0c73fa3cfc29fca4186b0959, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:59:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T22:59:19,039 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close e79b5789ee005872549f41003f60fa27 2024-12-11T22:59:19,039 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:59:19,039 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing e79b5789ee005872549f41003f60fa27, disabling compactions & flushes 2024-12-11T22:59:19,039 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:59:19,039 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. after waiting 0 ms 2024-12-11T22:59:19,040 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing f655ccde0c73fa3cfc29fca4186b0959, disabling compactions & flushes 2024-12-11T22:59:19,040 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. after waiting 0 ms 2024-12-11T22:59:19,040 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:59:19,047 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T22:59:19,047 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T22:59:19,048 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:59:19,048 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:59:19,048 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27. 2024-12-11T22:59:19,048 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959. 2024-12-11T22:59:19,048 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for e79b5789ee005872549f41003f60fa27: Waiting for close lock at 1733957959039Running coprocessor pre-close hooks at 1733957959039Disabling compacts and flushes for region at 1733957959039Disabling writes for close at 1733957959040 (+1 ms)Writing region close event to WAL at 1733957959041 (+1 ms)Running coprocessor post-close hooks at 1733957959048 (+7 ms)Closed at 1733957959048 2024-12-11T22:59:19,048 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for f655ccde0c73fa3cfc29fca4186b0959: Waiting for close lock at 1733957959040Running coprocessor pre-close hooks at 1733957959040Disabling compacts and flushes for region at 1733957959040Disabling writes for close at 1733957959040Writing region close event to WAL at 1733957959042 (+2 ms)Running coprocessor post-close hooks at 1733957959048 (+6 ms)Closed at 1733957959048 2024-12-11T22:59:19,051 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed e79b5789ee005872549f41003f60fa27 2024-12-11T22:59:19,052 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=e79b5789ee005872549f41003f60fa27, regionState=CLOSED 2024-12-11T22:59:19,052 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:59:19,053 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=f655ccde0c73fa3cfc29fca4186b0959, regionState=CLOSED 2024-12-11T22:59:19,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure e79b5789ee005872549f41003f60fa27, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:59:19,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure f655ccde0c73fa3cfc29fca4186b0959, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:59:19,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-11T22:59:19,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-11T22:59:19,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure e79b5789ee005872549f41003f60fa27, server=b762025f20c5,33867,1733957770938 in 170 msec 2024-12-11T22:59:19,059 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure f655ccde0c73fa3cfc29fca4186b0959, server=b762025f20c5,37563,1733957770731 in 169 msec 2024-12-11T22:59:19,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e79b5789ee005872549f41003f60fa27, UNASSIGN in 178 msec 2024-12-11T22:59:19,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-11T22:59:19,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=f655ccde0c73fa3cfc29fca4186b0959, UNASSIGN in 179 msec 2024-12-11T22:59:19,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-11T22:59:19,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 183 msec 2024-12-11T22:59:19,065 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957959064"}]},"ts":"1733957959064"} 2024-12-11T22:59:19,067 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-11T22:59:19,067 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-11T22:59:19,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 198 msec 2024-12-11T22:59:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T22:59:19,196 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-11T22:59:19,196 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-11T22:59:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:19,198 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-11T22:59:19,200 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:19,203 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-11T22:59:19,205 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:59:19,205 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27 2024-12-11T22:59:19,207 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/recovered.edits] 2024-12-11T22:59:19,207 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/recovered.edits] 2024-12-11T22:59:19,211 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf/59376c162ede4703af00cce86112213c to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/cf/59376c162ede4703af00cce86112213c 2024-12-11T22:59:19,211 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf/f54b58f3ff1349cba0aead6875a590f6 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/cf/f54b58f3ff1349cba0aead6875a590f6 2024-12-11T22:59:19,214 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/recovered.edits/8.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959/recovered.edits/8.seqid 2024-12-11T22:59:19,214 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/recovered.edits/8.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27/recovered.edits/8.seqid 2024-12-11T22:59:19,214 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:59:19,214 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportWithResetTtl/e79b5789ee005872549f41003f60fa27 2024-12-11T22:59:19,214 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-11T22:59:19,215 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-11T22:59:19,215 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-11T22:59:19,219 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f24622475dff4bf7be8b78cff2a5cc8d_f655ccde0c73fa3cfc29fca4186b0959 2024-12-11T22:59:19,219 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121110df38b5cef44365b5674fcb52b7dfc0_e79b5789ee005872549f41003f60fa27 2024-12-11T22:59:19,220 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-11T22:59:19,222 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:19,225 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-11T22:59:19,227 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-11T22:59:19,228 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:19,229 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-11T22:59:19,229 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957959229"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:19,229 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957959229"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:19,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T22:59:19,231 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e79b5789ee005872549f41003f60fa27, NAME => 'testExportWithResetTtl,,1733957937225.e79b5789ee005872549f41003f60fa27.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f655ccde0c73fa3cfc29fca4186b0959, NAME => 'testExportWithResetTtl,1,1733957937225.f655ccde0c73fa3cfc29fca4186b0959.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T22:59:19,231 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-11T22:59:19,231 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733957959231"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:19,233 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-11T22:59:19,234 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T22:59:19,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 38 msec 2024-12-11T22:59:19,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,247 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-11T22:59:19,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:19,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:19,257 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-11T22:59:19,257 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-11T22:59:19,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:19,258 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:19,258 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-11T22:59:19,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T22:59:19,262 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957959262"}]},"ts":"1733957959262"} 2024-12-11T22:59:19,264 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-11T22:59:19,265 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-11T22:59:19,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-11T22:59:19,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, UNASSIGN}] 2024-12-11T22:59:19,268 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, UNASSIGN 2024-12-11T22:59:19,268 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, UNASSIGN 2024-12-11T22:59:19,269 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=bebe8ade099f68470b70428d8bb439d5, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:59:19,269 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=7864551aa5bf1a578798812c8017df26, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:59:19,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, UNASSIGN because future has completed 2024-12-11T22:59:19,271 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:59:19,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7864551aa5bf1a578798812c8017df26, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:59:19,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, UNASSIGN because future has completed 2024-12-11T22:59:19,272 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:59:19,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure bebe8ade099f68470b70428d8bb439d5, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:59:19,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T22:59:19,423 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 7864551aa5bf1a578798812c8017df26 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 7864551aa5bf1a578798812c8017df26, disabling compactions & flushes 2024-12-11T22:59:19,424 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. after waiting 0 ms 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:59:19,424 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:59:19,424 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing bebe8ade099f68470b70428d8bb439d5, disabling compactions & flushes 2024-12-11T22:59:19,424 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:59:19,425 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:59:19,425 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. after waiting 0 ms 2024-12-11T22:59:19,425 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:59:19,427 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region bebe8ade099f68470b70428d8bb439d5 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:59:19,427 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7864551aa5bf1a578798812c8017df26 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:59:19,428 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:59:19,429 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:59:19,429 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:59:19,429 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26. 2024-12-11T22:59:19,429 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 7864551aa5bf1a578798812c8017df26: Waiting for close lock at 1733957959424Running coprocessor pre-close hooks at 1733957959424Disabling compacts and flushes for region at 1733957959424Disabling writes for close at 1733957959424Writing region close event to WAL at 1733957959425 (+1 ms)Running coprocessor post-close hooks at 1733957959429 (+4 ms)Closed at 1733957959429 2024-12-11T22:59:19,430 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:59:19,430 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5. 2024-12-11T22:59:19,430 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for bebe8ade099f68470b70428d8bb439d5: Waiting for close lock at 1733957959424Running coprocessor pre-close hooks at 1733957959424Disabling compacts and flushes for region at 1733957959424Disabling writes for close at 1733957959425 (+1 ms)Writing region close event to WAL at 1733957959425Running coprocessor post-close hooks at 1733957959430 (+5 ms)Closed at 1733957959430 2024-12-11T22:59:19,432 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 7864551aa5bf1a578798812c8017df26 2024-12-11T22:59:19,432 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=7864551aa5bf1a578798812c8017df26, regionState=CLOSED 2024-12-11T22:59:19,432 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:59:19,433 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=bebe8ade099f68470b70428d8bb439d5, regionState=CLOSED 2024-12-11T22:59:19,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7864551aa5bf1a578798812c8017df26, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:59:19,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure bebe8ade099f68470b70428d8bb439d5, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:59:19,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-11T22:59:19,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 7864551aa5bf1a578798812c8017df26, server=b762025f20c5,41635,1733957771023 in 164 msec 2024-12-11T22:59:19,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-11T22:59:19,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7864551aa5bf1a578798812c8017df26, UNASSIGN in 169 msec 2024-12-11T22:59:19,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure bebe8ade099f68470b70428d8bb439d5, server=b762025f20c5,37563,1733957770731 in 164 msec 2024-12-11T22:59:19,439 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-12-11T22:59:19,439 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=bebe8ade099f68470b70428d8bb439d5, UNASSIGN in 170 msec 2024-12-11T22:59:19,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-11T22:59:19,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 174 msec 2024-12-11T22:59:19,442 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957959442"}]},"ts":"1733957959442"} 2024-12-11T22:59:19,444 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-11T22:59:19,444 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-11T22:59:19,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 186 msec 2024-12-11T22:59:19,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T22:59:19,575 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T22:59:19,575 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-11T22:59:19,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,578 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-11T22:59:19,579 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,582 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-11T22:59:19,584 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:59:19,584 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26 2024-12-11T22:59:19,586 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/recovered.edits] 2024-12-11T22:59:19,586 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/recovered.edits] 2024-12-11T22:59:19,602 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf/38f41a6ef8d24f3daf1d0e6b70266dec to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/cf/38f41a6ef8d24f3daf1d0e6b70266dec 2024-12-11T22:59:19,602 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf/19eb120219f644ad8d34cf7ab422fc17 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/cf/19eb120219f644ad8d34cf7ab422fc17 2024-12-11T22:59:19,608 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5/recovered.edits/9.seqid 2024-12-11T22:59:19,608 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26/recovered.edits/9.seqid 2024-12-11T22:59:19,608 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:59:19,608 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithResetTtl/7864551aa5bf1a578798812c8017df26 2024-12-11T22:59:19,609 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-11T22:59:19,609 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-11T22:59:19,610 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-11T22:59:19,616 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202412113234f0dfa15b48689fdb51292c2400c3_7864551aa5bf1a578798812c8017df26 2024-12-11T22:59:19,616 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211b63fe11af701407bb5e0cb78e9e8ed5a_bebe8ade099f68470b70428d8bb439d5 2024-12-11T22:59:19,617 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-11T22:59:19,619 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,622 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-11T22:59:19,625 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-11T22:59:19,626 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,626 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-11T22:59:19,626 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957959626"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:19,627 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957959626"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:19,629 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T22:59:19,629 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => bebe8ade099f68470b70428d8bb439d5, NAME => 'testtb-testExportWithResetTtl,,1733957935848.bebe8ade099f68470b70428d8bb439d5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7864551aa5bf1a578798812c8017df26, NAME => 'testtb-testExportWithResetTtl,1,1733957935848.7864551aa5bf1a578798812c8017df26.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T22:59:19,629 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-11T22:59:19,630 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733957959629"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:19,632 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-11T22:59:19,633 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T22:59:19,634 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 58 msec 2024-12-11T22:59:19,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T22:59:19,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:19,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-11T22:59:19,660 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-11T22:59:19,661 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T22:59:19,670 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-11T22:59:19,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-11T22:59:19,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-11T22:59:19,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-11T22:59:19,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-11T22:59:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-11T22:59:19,699 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=797 (was 787) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_914922652_1 at /127.0.0.1:49912 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40997 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3161 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 120546) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_914922652_1 at /127.0.0.1:50480 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:48518 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:35682 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:40997 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:33209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:33209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:52352 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=795 (was 789) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=710 (was 851), ProcessCount=15 (was 17), AvailableMemoryMB=3982 (was 4099) 2024-12-11T22:59:19,700 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-11T22:59:19,718 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=797, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=710, ProcessCount=15, AvailableMemoryMB=3984 2024-12-11T22:59:19,718 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-11T22:59:19,719 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:59:19,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:19,721 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:59:19,721 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-11T22:59:19,722 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:59:19,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T22:59:19,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742037_1213 (size=443) 2024-12-11T22:59:19,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742037_1213 (size=443) 2024-12-11T22:59:19,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742037_1213 (size=443) 2024-12-11T22:59:19,730 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8354263e3cc4eb0ec0ad4ea34c4e06c4, NAME => 'testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:19,731 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => af0dc4d9b6d0a60003281d7a4a7fd8a4, NAME => 'testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742038_1214 (size=68) 2024-12-11T22:59:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742038_1214 (size=68) 2024-12-11T22:59:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742039_1215 (size=68) 2024-12-11T22:59:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742038_1214 (size=68) 2024-12-11T22:59:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742039_1215 (size=68) 2024-12-11T22:59:19,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742039_1215 (size=68) 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 8354263e3cc4eb0ec0ad4ea34c4e06c4, disabling compactions & flushes 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing af0dc4d9b6d0a60003281d7a4a7fd8a4, disabling compactions & flushes 2024-12-11T22:59:19,738 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:19,738 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. after waiting 0 ms 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. after waiting 0 ms 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:19,738 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:19,738 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8354263e3cc4eb0ec0ad4ea34c4e06c4: Waiting for close lock at 1733957959738Disabling compacts and flushes for region at 1733957959738Disabling writes for close at 1733957959738Writing region close event to WAL at 1733957959738Closed at 1733957959738 2024-12-11T22:59:19,738 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for af0dc4d9b6d0a60003281d7a4a7fd8a4: Waiting for close lock at 1733957959738Disabling compacts and flushes for region at 1733957959738Disabling writes for close at 1733957959738Writing region close event to WAL at 1733957959738Closed at 1733957959738 2024-12-11T22:59:19,740 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:59:19,740 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733957959740"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957959740"}]},"ts":"1733957959740"} 2024-12-11T22:59:19,740 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733957959740"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957959740"}]},"ts":"1733957959740"} 2024-12-11T22:59:19,742 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T22:59:19,743 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:59:19,743 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957959743"}]},"ts":"1733957959743"} 2024-12-11T22:59:19,745 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-11T22:59:19,745 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:59:19,746 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:59:19,746 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:59:19,746 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:59:19,746 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:59:19,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, ASSIGN}] 2024-12-11T22:59:19,747 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, ASSIGN 2024-12-11T22:59:19,747 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, ASSIGN 2024-12-11T22:59:19,748 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T22:59:19,748 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T22:59:19,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T22:59:19,899 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:59:19,899 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=af0dc4d9b6d0a60003281d7a4a7fd8a4, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:59:19,899 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=8354263e3cc4eb0ec0ad4ea34c4e06c4, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:59:19,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, ASSIGN because future has completed 2024-12-11T22:59:19,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:59:19,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, ASSIGN because future has completed 2024-12-11T22:59:19,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:59:20,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T22:59:20,057 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:20,057 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => af0dc4d9b6d0a60003281d7a4a7fd8a4, NAME => 'testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T22:59:20,057 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:20,058 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 8354263e3cc4eb0ec0ad4ea34c4e06c4, NAME => 'testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T22:59:20,058 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. service=AccessControlService 2024-12-11T22:59:20,058 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:59:20,058 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. service=AccessControlService 2024-12-11T22:59:20,058 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,058 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:59:20,058 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:20,059 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,059 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,059 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,059 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:20,059 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,059 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,061 INFO [StoreOpener-af0dc4d9b6d0a60003281d7a4a7fd8a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,061 INFO [StoreOpener-8354263e3cc4eb0ec0ad4ea34c4e06c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,063 INFO [StoreOpener-8354263e3cc4eb0ec0ad4ea34c4e06c4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8354263e3cc4eb0ec0ad4ea34c4e06c4 columnFamilyName cf 2024-12-11T22:59:20,063 INFO [StoreOpener-af0dc4d9b6d0a60003281d7a4a7fd8a4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af0dc4d9b6d0a60003281d7a4a7fd8a4 columnFamilyName cf 2024-12-11T22:59:20,065 DEBUG [StoreOpener-af0dc4d9b6d0a60003281d7a4a7fd8a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:20,065 DEBUG [StoreOpener-8354263e3cc4eb0ec0ad4ea34c4e06c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:20,066 INFO [StoreOpener-8354263e3cc4eb0ec0ad4ea34c4e06c4-1 {}] regionserver.HStore(327): Store=8354263e3cc4eb0ec0ad4ea34c4e06c4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:59:20,066 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,067 INFO [StoreOpener-af0dc4d9b6d0a60003281d7a4a7fd8a4-1 {}] regionserver.HStore(327): Store=af0dc4d9b6d0a60003281d7a4a7fd8a4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:59:20,067 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,067 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,067 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,067 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,068 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,068 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,068 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,068 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,068 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,069 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,070 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,071 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:59:20,072 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 8354263e3cc4eb0ec0ad4ea34c4e06c4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65318147, jitterRate=-0.026683762669563293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:59:20,072 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,072 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:59:20,073 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened af0dc4d9b6d0a60003281d7a4a7fd8a4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74302458, jitterRate=0.10719290375709534}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:59:20,073 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 8354263e3cc4eb0ec0ad4ea34c4e06c4: Running coprocessor pre-open hook at 1733957960059Writing region info on filesystem at 1733957960059Initializing all the Stores at 1733957960060 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957960061 (+1 ms)Cleaning up temporary data from old regions at 1733957960068 (+7 ms)Running coprocessor post-open hooks at 1733957960072 (+4 ms)Region opened successfully at 1733957960073 (+1 ms) 2024-12-11T22:59:20,073 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,073 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for af0dc4d9b6d0a60003281d7a4a7fd8a4: Running coprocessor pre-open hook at 1733957960059Writing region info on filesystem at 1733957960059Initializing all the Stores at 1733957960060 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957960060Cleaning up temporary data from old regions at 1733957960068 (+8 ms)Running coprocessor post-open hooks at 1733957960073 (+5 ms)Region opened successfully at 1733957960073 2024-12-11T22:59:20,073 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4., pid=99, masterSystemTime=1733957960053 2024-12-11T22:59:20,074 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4., pid=100, masterSystemTime=1733957960054 2024-12-11T22:59:20,076 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:20,076 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:20,076 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=af0dc4d9b6d0a60003281d7a4a7fd8a4, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:59:20,077 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:20,077 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:20,077 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=8354263e3cc4eb0ec0ad4ea34c4e06c4, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:59:20,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:59:20,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:59:20,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-11T22:59:20,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4, server=b762025f20c5,33867,1733957770938 in 180 msec 2024-12-11T22:59:20,085 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, ASSIGN in 337 msec 2024-12-11T22:59:20,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-11T22:59:20,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4, server=b762025f20c5,37563,1733957770731 in 181 msec 2024-12-11T22:59:20,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-11T22:59:20,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, ASSIGN in 339 msec 2024-12-11T22:59:20,088 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:59:20,088 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957960088"}]},"ts":"1733957960088"} 2024-12-11T22:59:20,090 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-11T22:59:20,092 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:59:20,092 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-11T22:59:20,095 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T22:59:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-11T22:59:20,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-11T22:59:20,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-11T22:59:20,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-11T22:59:20,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:20,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:20,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:20,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:20,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:20,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:20,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:20,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:20,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 420 msec 2024-12-11T22:59:20,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T22:59:20,345 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T22:59:20,345 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:20,348 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-11T22:59:20,348 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:20,349 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:59:20,350 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:20,356 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:20,362 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:20,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T22:59:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957960366 (current time:1733957960366). 2024-12-11T22:59:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:59:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-11T22:59:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:59:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a5326e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:20,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:20,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:20,368 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:20,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:20,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:20,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61bba5dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:20,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:20,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:20,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:20,370 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:20,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@581765ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:20,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:20,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:20,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:20,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:20,375 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:20,375 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3870fc4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:20,377 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:20,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:20,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:20,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb2a25f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:20,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:20,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:20,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:20,378 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34550, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:20,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@646c2f4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:20,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:20,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:20,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:20,382 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:20,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:59:20,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:20,385 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:20,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:20,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:20,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:20,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:20,387 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:20,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T22:59:20,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:59:20,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T22:59:20,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-11T22:59:20,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T22:59:20,392 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:59:20,393 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:59:20,396 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:59:20,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742040_1216 (size=170) 2024-12-11T22:59:20,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742040_1216 (size=170) 2024-12-11T22:59:20,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742040_1216 (size=170) 2024-12-11T22:59:20,415 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:59:20,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4}] 2024-12-11T22:59:20,417 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,417 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T22:59:20,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-11T22:59:20,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:20,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 8354263e3cc4eb0ec0ad4ea34c4e06c4: 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for af0dc4d9b6d0a60003281d7a4a7fd8a4: 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. for emptySnaptb0-testExportFileSystemState completed. 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. for emptySnaptb0-testExportFileSystemState completed. 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:59:20,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:59:20,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742041_1217 (size=71) 2024-12-11T22:59:20,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742041_1217 (size=71) 2024-12-11T22:59:20,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742041_1217 (size=71) 2024-12-11T22:59:20,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:20,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-11T22:59:20,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-11T22:59:20,600 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,600 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:20,604 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4 in 186 msec 2024-12-11T22:59:20,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742042_1218 (size=71) 2024-12-11T22:59:20,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742042_1218 (size=71) 2024-12-11T22:59:20,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742042_1218 (size=71) 2024-12-11T22:59:20,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:20,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-11T22:59:20,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-11T22:59:20,611 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:20,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-11T22:59:20,613 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:59:20,614 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4 in 197 msec 2024-12-11T22:59:20,614 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:59:20,616 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:59:20,616 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:59:20,616 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:20,617 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T22:59:20,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742043_1219 (size=63) 2024-12-11T22:59:20,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742043_1219 (size=63) 2024-12-11T22:59:20,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742043_1219 (size=63) 2024-12-11T22:59:20,639 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:59:20,639 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-11T22:59:20,642 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-11T22:59:20,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742044_1220 (size=653) 2024-12-11T22:59:20,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742044_1220 (size=653) 2024-12-11T22:59:20,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742044_1220 (size=653) 2024-12-11T22:59:20,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T22:59:21,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T22:59:21,076 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:59:21,086 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:59:21,087 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-11T22:59:21,088 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:59:21,088 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-11T22:59:21,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 700 msec 2024-12-11T22:59:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T22:59:21,526 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T22:59:21,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:59:21,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:59:21,540 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:21,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-11T22:59:21,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:21,543 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:59:21,545 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:21,550 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:21,558 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T22:59:21,562 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T22:59:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957961562 (current time:1733957961562). 2024-12-11T22:59:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:59:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-11T22:59:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:59:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@213febd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:21,564 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:21,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:21,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:21,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c357f5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:21,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:21,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:21,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:21,566 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:21,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e863345, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:21,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:21,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:21,568 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35440, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:21,570 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:21,570 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d5aa588, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:21,572 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:21,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:21,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:21,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b4ab2b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:21,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:21,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:21,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:21,574 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:21,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a88583a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:21,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:21,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:21,578 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:21,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:59:21,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:21,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:21,582 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:21,583 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T22:59:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:59:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T22:59:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-11T22:59:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T22:59:21,586 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:59:21,587 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:59:21,590 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:59:21,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742045_1221 (size=165) 2024-12-11T22:59:21,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742045_1221 (size=165) 2024-12-11T22:59:21,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742045_1221 (size=165) 2024-12-11T22:59:21,597 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:59:21,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4}] 2024-12-11T22:59:21,598 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:21,598 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T22:59:21,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-11T22:59:21,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-11T22:59:21,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:21,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:21,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing af0dc4d9b6d0a60003281d7a4a7fd8a4 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-11T22:59:21,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 8354263e3cc4eb0ec0ad4ea34c4e06c4 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-11T22:59:21,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4 is 71, key is 0b97b96d955e783bd6aa85e1cf62f221/cf:q/1733957961535/Put/seqid=0 2024-12-11T22:59:21,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4 is 71, key is 18c4743519bc7942b6a57d67d5e5931e/cf:q/1733957961538/Put/seqid=0 2024-12-11T22:59:21,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742046_1222 (size=8171) 2024-12-11T22:59:21,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742046_1222 (size=8171) 2024-12-11T22:59:21,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742046_1222 (size=8171) 2024-12-11T22:59:21,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:21,796 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:21,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/.tmp/cf/95eb1e9d48f845f6a5c1c846a414dc4e, store: [table=testtb-testExportFileSystemState family=cf region=af0dc4d9b6d0a60003281d7a4a7fd8a4] 2024-12-11T22:59:21,799 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/.tmp/cf/95eb1e9d48f845f6a5c1c846a414dc4e is 209, key is 1bf28206bf9f8e062ddafe3228c68193e/cf:q/1733957961538/Put/seqid=0 2024-12-11T22:59:21,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742047_1223 (size=5102) 2024-12-11T22:59:21,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742047_1223 (size=5102) 2024-12-11T22:59:21,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742047_1223 (size=5102) 2024-12-11T22:59:21,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:21,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:21,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/.tmp/cf/0a889bc026f145468378811daa2558d0, store: [table=testtb-testExportFileSystemState family=cf region=8354263e3cc4eb0ec0ad4ea34c4e06c4] 2024-12-11T22:59:21,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/.tmp/cf/0a889bc026f145468378811daa2558d0 is 209, key is 0b912834aae352bf7e6b841370800579b/cf:q/1733957961535/Put/seqid=0 2024-12-11T22:59:21,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742048_1224 (size=14997) 2024-12-11T22:59:21,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742048_1224 (size=14997) 2024-12-11T22:59:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742048_1224 (size=14997) 2024-12-11T22:59:21,819 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/.tmp/cf/95eb1e9d48f845f6a5c1c846a414dc4e 2024-12-11T22:59:21,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/.tmp/cf/95eb1e9d48f845f6a5c1c846a414dc4e as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf/95eb1e9d48f845f6a5c1c846a414dc4e 2024-12-11T22:59:21,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf/95eb1e9d48f845f6a5c1c846a414dc4e, entries=47, sequenceid=6, filesize=14.6 K 2024-12-11T22:59:21,834 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for af0dc4d9b6d0a60003281d7a4a7fd8a4 in 84ms, sequenceid=6, compaction requested=false 2024-12-11T22:59:21,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-11T22:59:21,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for af0dc4d9b6d0a60003281d7a4a7fd8a4: 2024-12-11T22:59:21,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. for snaptb0-testExportFileSystemState completed. 2024-12-11T22:59:21,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-11T22:59:21,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:21,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf/95eb1e9d48f845f6a5c1c846a414dc4e] hfiles 2024-12-11T22:59:21,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf/95eb1e9d48f845f6a5c1c846a414dc4e for snapshot=snaptb0-testExportFileSystemState 2024-12-11T22:59:21,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742049_1225 (size=5918) 2024-12-11T22:59:21,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742049_1225 (size=5918) 2024-12-11T22:59:21,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742049_1225 (size=5918) 2024-12-11T22:59:21,838 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/.tmp/cf/0a889bc026f145468378811daa2558d0 2024-12-11T22:59:21,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/.tmp/cf/0a889bc026f145468378811daa2558d0 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf/0a889bc026f145468378811daa2558d0 2024-12-11T22:59:21,850 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf/0a889bc026f145468378811daa2558d0, entries=3, sequenceid=6, filesize=5.8 K 2024-12-11T22:59:21,851 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 8354263e3cc4eb0ec0ad4ea34c4e06c4 in 101ms, sequenceid=6, compaction requested=false 2024-12-11T22:59:21,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 8354263e3cc4eb0ec0ad4ea34c4e06c4: 2024-12-11T22:59:21,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. for snaptb0-testExportFileSystemState completed. 2024-12-11T22:59:21,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-11T22:59:21,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:21,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf/0a889bc026f145468378811daa2558d0] hfiles 2024-12-11T22:59:21,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf/0a889bc026f145468378811daa2558d0 for snapshot=snaptb0-testExportFileSystemState 2024-12-11T22:59:21,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742050_1226 (size=110) 2024-12-11T22:59:21,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742050_1226 (size=110) 2024-12-11T22:59:21,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742050_1226 (size=110) 2024-12-11T22:59:21,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:21,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-11T22:59:21,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-11T22:59:21,859 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:21,859 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:21,867 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4 in 264 msec 2024-12-11T22:59:21,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742051_1227 (size=110) 2024-12-11T22:59:21,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742051_1227 (size=110) 2024-12-11T22:59:21,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:21,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742051_1227 (size=110) 2024-12-11T22:59:21,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-11T22:59:21,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-11T22:59:21,877 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:21,877 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:21,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-12-11T22:59:21,879 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:59:21,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4 in 281 msec 2024-12-11T22:59:21,880 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:59:21,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:59:21,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:59:21,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:21,882 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4] hfiles 2024-12-11T22:59:21,882 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:21,882 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:21,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742052_1228 (size=294) 2024-12-11T22:59:21,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742052_1228 (size=294) 2024-12-11T22:59:21,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742052_1228 (size=294) 2024-12-11T22:59:21,897 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:59:21,897 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-11T22:59:21,898 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T22:59:21,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T22:59:21,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742053_1229 (size=963) 2024-12-11T22:59:21,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742053_1229 (size=963) 2024-12-11T22:59:21,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742053_1229 (size=963) 2024-12-11T22:59:21,921 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:59:21,927 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:59:21,927 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T22:59:21,929 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:59:21,929 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-11T22:59:21,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 345 msec 2024-12-11T22:59:22,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T22:59:22,215 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T22:59:22,215 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215 2024-12-11T22:59:22,215 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:22,251 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:22,251 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T22:59:22,252 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T22:59:22,257 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T22:59:22,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742054_1230 (size=165) 2024-12-11T22:59:22,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742054_1230 (size=165) 2024-12-11T22:59:22,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742054_1230 (size=165) 2024-12-11T22:59:22,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742055_1231 (size=963) 2024-12-11T22:59:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742055_1231 (size=963) 2024-12-11T22:59:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742055_1231 (size=963) 2024-12-11T22:59:22,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:22,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:22,294 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,110 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0003_000001 (auth:SIMPLE) from 127.0.0.1:48896 2024-12-11T22:59:23,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000001/launch_container.sh] 2024-12-11T22:59:23,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000001/container_tokens] 2024-12-11T22:59:23,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0003/container_1733957780200_0003_01_000001/sysfs] 2024-12-11T22:59:23,231 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-1745026209710056917.jar 2024-12-11T22:59:23,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,232 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,288 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-16364821890944097101.jar 2024-12-11T22:59:23,288 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:23,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T22:59:23,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T22:59:23,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T22:59:23,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T22:59:23,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T22:59:23,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T22:59:23,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T22:59:23,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T22:59:23,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T22:59:23,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T22:59:23,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T22:59:23,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:23,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:23,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:59:23,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:23,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:23,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:59:23,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:59:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742056_1232 (size=131440) 2024-12-11T22:59:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742056_1232 (size=131440) 2024-12-11T22:59:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742056_1232 (size=131440) 2024-12-11T22:59:23,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742057_1233 (size=4188619) 2024-12-11T22:59:23,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742057_1233 (size=4188619) 2024-12-11T22:59:23,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742057_1233 (size=4188619) 2024-12-11T22:59:23,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742058_1234 (size=1323991) 2024-12-11T22:59:23,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742058_1234 (size=1323991) 2024-12-11T22:59:23,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742058_1234 (size=1323991) 2024-12-11T22:59:23,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742059_1235 (size=443172) 2024-12-11T22:59:23,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742059_1235 (size=443172) 2024-12-11T22:59:23,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742059_1235 (size=443172) 2024-12-11T22:59:23,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742060_1236 (size=903541) 2024-12-11T22:59:23,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742060_1236 (size=903541) 2024-12-11T22:59:23,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742060_1236 (size=903541) 2024-12-11T22:59:23,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742061_1237 (size=8360360) 2024-12-11T22:59:23,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742061_1237 (size=8360360) 2024-12-11T22:59:23,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742061_1237 (size=8360360) 2024-12-11T22:59:23,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742062_1238 (size=1877034) 2024-12-11T22:59:23,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742062_1238 (size=1877034) 2024-12-11T22:59:23,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742062_1238 (size=1877034) 2024-12-11T22:59:23,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742063_1239 (size=6426222) 2024-12-11T22:59:23,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742063_1239 (size=6426222) 2024-12-11T22:59:23,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742063_1239 (size=6426222) 2024-12-11T22:59:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742064_1240 (size=77835) 2024-12-11T22:59:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742064_1240 (size=77835) 2024-12-11T22:59:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742064_1240 (size=77835) 2024-12-11T22:59:23,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742065_1241 (size=30949) 2024-12-11T22:59:23,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742065_1241 (size=30949) 2024-12-11T22:59:23,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742065_1241 (size=30949) 2024-12-11T22:59:23,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742066_1242 (size=1597213) 2024-12-11T22:59:23,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742066_1242 (size=1597213) 2024-12-11T22:59:23,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742066_1242 (size=1597213) 2024-12-11T22:59:23,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742067_1243 (size=4695811) 2024-12-11T22:59:23,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742067_1243 (size=4695811) 2024-12-11T22:59:23,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742067_1243 (size=4695811) 2024-12-11T22:59:23,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742068_1244 (size=232957) 2024-12-11T22:59:23,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742068_1244 (size=232957) 2024-12-11T22:59:23,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742068_1244 (size=232957) 2024-12-11T22:59:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742069_1245 (size=127628) 2024-12-11T22:59:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742069_1245 (size=127628) 2024-12-11T22:59:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742069_1245 (size=127628) 2024-12-11T22:59:23,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742070_1246 (size=20406) 2024-12-11T22:59:23,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742070_1246 (size=20406) 2024-12-11T22:59:23,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742070_1246 (size=20406) 2024-12-11T22:59:23,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742071_1247 (size=5175431) 2024-12-11T22:59:23,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742071_1247 (size=5175431) 2024-12-11T22:59:23,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742071_1247 (size=5175431) 2024-12-11T22:59:23,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742072_1248 (size=217634) 2024-12-11T22:59:23,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742072_1248 (size=217634) 2024-12-11T22:59:23,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742072_1248 (size=217634) 2024-12-11T22:59:23,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742073_1249 (size=1832290) 2024-12-11T22:59:23,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742073_1249 (size=1832290) 2024-12-11T22:59:23,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742073_1249 (size=1832290) 2024-12-11T22:59:23,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742074_1250 (size=322274) 2024-12-11T22:59:23,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742074_1250 (size=322274) 2024-12-11T22:59:23,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742074_1250 (size=322274) 2024-12-11T22:59:23,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742075_1251 (size=503880) 2024-12-11T22:59:23,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742075_1251 (size=503880) 2024-12-11T22:59:23,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742075_1251 (size=503880) 2024-12-11T22:59:23,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742076_1252 (size=29229) 2024-12-11T22:59:23,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742076_1252 (size=29229) 2024-12-11T22:59:23,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742076_1252 (size=29229) 2024-12-11T22:59:23,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742077_1253 (size=24096) 2024-12-11T22:59:23,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742077_1253 (size=24096) 2024-12-11T22:59:23,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742077_1253 (size=24096) 2024-12-11T22:59:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742078_1254 (size=111872) 2024-12-11T22:59:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742078_1254 (size=111872) 2024-12-11T22:59:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742078_1254 (size=111872) 2024-12-11T22:59:23,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742079_1255 (size=45609) 2024-12-11T22:59:23,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742079_1255 (size=45609) 2024-12-11T22:59:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742079_1255 (size=45609) 2024-12-11T22:59:23,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742080_1256 (size=136454) 2024-12-11T22:59:23,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742080_1256 (size=136454) 2024-12-11T22:59:23,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742080_1256 (size=136454) 2024-12-11T22:59:23,740 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T22:59:23,743 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-11T22:59:23,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.6 K 2024-12-11T22:59:23,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T22:59:23,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-11T22:59:23,745 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T22:59:23,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742081_1257 (size=1035) 2024-12-11T22:59:23,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742081_1257 (size=1035) 2024-12-11T22:59:23,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742081_1257 (size=1035) 2024-12-11T22:59:23,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742082_1258 (size=35) 2024-12-11T22:59:23,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742082_1258 (size=35) 2024-12-11T22:59:23,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742082_1258 (size=35) 2024-12-11T22:59:23,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742083_1259 (size=304004) 2024-12-11T22:59:23,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742083_1259 (size=304004) 2024-12-11T22:59:23,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742083_1259 (size=304004) 2024-12-11T22:59:23,870 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:59:23,870 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:59:23,979 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:52164 2024-12-11T22:59:24,570 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:59:29,997 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:45672 2024-12-11T22:59:30,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742084_1260 (size=349702) 2024-12-11T22:59:30,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742084_1260 (size=349702) 2024-12-11T22:59:30,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742084_1260 (size=349702) 2024-12-11T22:59:32,205 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:40220 2024-12-11T22:59:32,205 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:59362 2024-12-11T22:59:33,097 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:40224 2024-12-11T22:59:33,098 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:59364 2024-12-11T22:59:35,103 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T22:59:36,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742085_1261 (size=8171) 2024-12-11T22:59:36,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742085_1261 (size=8171) 2024-12-11T22:59:36,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742085_1261 (size=8171) 2024-12-11T22:59:37,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742087_1263 (size=14997) 2024-12-11T22:59:37,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742087_1263 (size=14997) 2024-12-11T22:59:37,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742087_1263 (size=14997) 2024-12-11T22:59:37,892 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000002/launch_container.sh] 2024-12-11T22:59:37,892 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000002/container_tokens] 2024-12-11T22:59:37,892 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000002/sysfs] 2024-12-11T22:59:38,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742088_1264 (size=5102) 2024-12-11T22:59:38,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742088_1264 (size=5102) 2024-12-11T22:59:38,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742088_1264 (size=5102) 2024-12-11T22:59:38,618 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000005/launch_container.sh] 2024-12-11T22:59:38,618 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000005/container_tokens] 2024-12-11T22:59:38,618 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000005/sysfs] 2024-12-11T22:59:38,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742089_1265 (size=5918) 2024-12-11T22:59:38,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742089_1265 (size=5918) 2024-12-11T22:59:38,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742089_1265 (size=5918) 2024-12-11T22:59:38,684 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T22:59:38,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742086_1262 (size=31746) 2024-12-11T22:59:38,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742086_1262 (size=31746) 2024-12-11T22:59:38,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742086_1262 (size=31746) 2024-12-11T22:59:38,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742090_1266 (size=466) 2024-12-11T22:59:38,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742090_1266 (size=466) 2024-12-11T22:59:38,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742090_1266 (size=466) 2024-12-11T22:59:38,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742091_1267 (size=31746) 2024-12-11T22:59:38,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742091_1267 (size=31746) 2024-12-11T22:59:38,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742091_1267 (size=31746) 2024-12-11T22:59:38,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742092_1268 (size=349702) 2024-12-11T22:59:38,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742092_1268 (size=349702) 2024-12-11T22:59:38,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742092_1268 (size=349702) 2024-12-11T22:59:38,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:55904 2024-12-11T22:59:38,830 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:35328 2024-12-11T22:59:38,842 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000004/launch_container.sh] 2024-12-11T22:59:38,842 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000004/container_tokens] 2024-12-11T22:59:38,842 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000004/sysfs] 2024-12-11T22:59:40,040 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T22:59:40,040 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T22:59:40,047 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-11T22:59:40,047 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T22:59:40,047 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T22:59:40,047 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T22:59:40,048 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-11T22:59:40,048 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-11T22:59:40,048 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T22:59:40,048 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-11T22:59:40,048 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733957962215/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-11T22:59:40,053 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-11T22:59:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T22:59:40,056 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957980056"}]},"ts":"1733957980056"} 2024-12-11T22:59:40,058 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-11T22:59:40,058 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-11T22:59:40,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-11T22:59:40,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, UNASSIGN}] 2024-12-11T22:59:40,060 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, UNASSIGN 2024-12-11T22:59:40,060 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, UNASSIGN 2024-12-11T22:59:40,061 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=af0dc4d9b6d0a60003281d7a4a7fd8a4, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:59:40,061 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=8354263e3cc4eb0ec0ad4ea34c4e06c4, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T22:59:40,061 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=b762025f20c5,33867,1733957770938, table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T22:59:40,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, UNASSIGN because future has completed 2024-12-11T22:59:40,063 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:59:40,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:59:40,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, UNASSIGN because future has completed 2024-12-11T22:59:40,063 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T22:59:40,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4, server=b762025f20c5,37563,1733957770731}] 2024-12-11T22:59:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T22:59:40,216 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:40,216 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:59:40,217 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing af0dc4d9b6d0a60003281d7a4a7fd8a4, disabling compactions & flushes 2024-12-11T22:59:40,217 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:40,217 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:40,217 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:40,217 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. after waiting 0 ms 2024-12-11T22:59:40,217 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:40,217 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T22:59:40,218 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 8354263e3cc4eb0ec0ad4ea34c4e06c4, disabling compactions & flushes 2024-12-11T22:59:40,218 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:40,218 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:40,218 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. after waiting 0 ms 2024-12-11T22:59:40,218 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:40,228 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:59:40,228 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T22:59:40,228 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:59:40,228 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4. 2024-12-11T22:59:40,228 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T22:59:40,228 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 8354263e3cc4eb0ec0ad4ea34c4e06c4: Waiting for close lock at 1733957980218Running coprocessor pre-close hooks at 1733957980218Disabling compacts and flushes for region at 1733957980218Disabling writes for close at 1733957980218Writing region close event to WAL at 1733957980220 (+2 ms)Running coprocessor post-close hooks at 1733957980228 (+8 ms)Closed at 1733957980228 2024-12-11T22:59:40,228 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4. 2024-12-11T22:59:40,228 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for af0dc4d9b6d0a60003281d7a4a7fd8a4: Waiting for close lock at 1733957980217Running coprocessor pre-close hooks at 1733957980217Disabling compacts and flushes for region at 1733957980217Disabling writes for close at 1733957980217Writing region close event to WAL at 1733957980220 (+3 ms)Running coprocessor post-close hooks at 1733957980228 (+8 ms)Closed at 1733957980228 2024-12-11T22:59:40,230 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:40,230 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=8354263e3cc4eb0ec0ad4ea34c4e06c4, regionState=CLOSED 2024-12-11T22:59:40,231 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:40,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=af0dc4d9b6d0a60003281d7a4a7fd8a4, regionState=CLOSED 2024-12-11T22:59:40,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T22:59:40,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:59:40,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-11T22:59:40,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 8354263e3cc4eb0ec0ad4ea34c4e06c4, server=b762025f20c5,37563,1733957770731 in 170 msec 2024-12-11T22:59:40,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-11T22:59:40,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure af0dc4d9b6d0a60003281d7a4a7fd8a4, server=b762025f20c5,33867,1733957770938 in 170 msec 2024-12-11T22:59:40,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8354263e3cc4eb0ec0ad4ea34c4e06c4, UNASSIGN in 175 msec 2024-12-11T22:59:40,236 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-11T22:59:40,236 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=af0dc4d9b6d0a60003281d7a4a7fd8a4, UNASSIGN in 176 msec 2024-12-11T22:59:40,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-11T22:59:40,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 179 msec 2024-12-11T22:59:40,239 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957980239"}]},"ts":"1733957980239"} 2024-12-11T22:59:40,241 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-11T22:59:40,241 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-11T22:59:40,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 187 msec 2024-12-11T22:59:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T22:59:40,375 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T22:59:40,376 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-11T22:59:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,377 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-11T22:59:40,378 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,382 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-11T22:59:40,382 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:40,382 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:40,384 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/recovered.edits] 2024-12-11T22:59:40,385 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/recovered.edits] 2024-12-11T22:59:40,434 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf/95eb1e9d48f845f6a5c1c846a414dc4e to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/cf/95eb1e9d48f845f6a5c1c846a414dc4e 2024-12-11T22:59:40,434 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf/0a889bc026f145468378811daa2558d0 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/cf/0a889bc026f145468378811daa2558d0 2024-12-11T22:59:40,437 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4/recovered.edits/9.seqid 2024-12-11T22:59:40,437 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4/recovered.edits/9.seqid 2024-12-11T22:59:40,437 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:40,437 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemState/af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:40,437 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-11T22:59:40,438 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-11T22:59:40,439 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-11T22:59:40,659 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241211b22c7a0011424aba8a33280c1d93d244_af0dc4d9b6d0a60003281d7a4a7fd8a4 2024-12-11T22:59:40,659 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024121117c4ba52dcbf454cbc1e8a4bbed1f4af_8354263e3cc4eb0ec0ad4ea34c4e06c4 2024-12-11T22:59:40,661 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-11T22:59:40,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T22:59:40,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T22:59:40,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T22:59:40,799 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T22:59:40,802 INFO [AsyncFSWAL-0-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583 {}] wal.AbstractFSWAL(1368): Slow sync cost: 139 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36751,DS-181d4275-97db-424d-8b8b-c39260b79c8d,DISK], DatanodeInfoWithStorage[127.0.0.1:41607,DS-03ef22e9-098d-4974-baec-426a6b14708f,DISK]] 2024-12-11T22:59:40,802 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,806 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-11T22:59:40,809 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-11T22:59:40,811 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,811 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-11T22:59:40,811 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957980811"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:40,811 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733957980811"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:40,814 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T22:59:40,814 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8354263e3cc4eb0ec0ad4ea34c4e06c4, NAME => 'testtb-testExportFileSystemState,,1733957959719.8354263e3cc4eb0ec0ad4ea34c4e06c4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => af0dc4d9b6d0a60003281d7a4a7fd8a4, NAME => 'testtb-testExportFileSystemState,1,1733957959719.af0dc4d9b6d0a60003281d7a4a7fd8a4.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T22:59:40,814 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-11T22:59:40,814 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733957980814"}]},"ts":"9223372036854775807"} 2024-12-11T22:59:40,816 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-11T22:59:40,816 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T22:59:40,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 440 msec 2024-12-11T22:59:40,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T22:59:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:40,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:40,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-11T22:59:40,897 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-11T22:59:40,897 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T22:59:40,904 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-11T22:59:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-11T22:59:40,908 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-11T22:59:40,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-11T22:59:40,929 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=798 (was 797) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43611 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4010 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1326230229_1 at /127.0.0.1:46620 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:44580 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 124185) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1326230229_1 at /127.0.0.1:44566 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:53600 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:43611 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=787 (was 795), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=698 (was 710), ProcessCount=15 (was 15), AvailableMemoryMB=3862 (was 3984) 2024-12-11T22:59:40,930 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-11T22:59:40,944 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=798, OpenFileDescriptor=787, MaxFileDescriptor=1048576, SystemLoadAverage=698, ProcessCount=15, AvailableMemoryMB=3861 2024-12-11T22:59:40,944 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-11T22:59:40,945 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:59:40,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-11T22:59:40,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:59:40,947 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-11T22:59:40,948 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:59:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T22:59:40,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742093_1269 (size=440) 2024-12-11T22:59:40,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742093_1269 (size=440) 2024-12-11T22:59:40,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742093_1269 (size=440) 2024-12-11T22:59:40,957 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3969e48306fccb73cb0670db9b3ca4a5, NAME => 'testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:40,957 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0755d5b54c8766db1089850dbb01c1c5, NAME => 'testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742094_1270 (size=65) 2024-12-11T22:59:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742094_1270 (size=65) 2024-12-11T22:59:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742094_1270 (size=65) 2024-12-11T22:59:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742095_1271 (size=65) 2024-12-11T22:59:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742095_1271 (size=65) 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 0755d5b54c8766db1089850dbb01c1c5, disabling compactions & flushes 2024-12-11T22:59:41,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742095_1271 (size=65) 2024-12-11T22:59:41,046 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. after waiting 0 ms 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,046 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0755d5b54c8766db1089850dbb01c1c5: Waiting for close lock at 1733957981046Disabling compacts and flushes for region at 1733957981046Disabling writes for close at 1733957981046Writing region close event to WAL at 1733957981046Closed at 1733957981046 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 3969e48306fccb73cb0670db9b3ca4a5, disabling compactions & flushes 2024-12-11T22:59:41,046 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. after waiting 0 ms 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,046 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,046 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3969e48306fccb73cb0670db9b3ca4a5: Waiting for close lock at 1733957981046Disabling compacts and flushes for region at 1733957981046Disabling writes for close at 1733957981046Writing region close event to WAL at 1733957981046Closed at 1733957981046 2024-12-11T22:59:41,047 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:59:41,048 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733957981048"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957981048"}]},"ts":"1733957981048"} 2024-12-11T22:59:41,048 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733957981048"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733957981048"}]},"ts":"1733957981048"} 2024-12-11T22:59:41,050 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T22:59:41,051 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:59:41,051 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957981051"}]},"ts":"1733957981051"} 2024-12-11T22:59:41,052 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-11T22:59:41,053 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:59:41,054 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:59:41,054 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:59:41,054 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:59:41,054 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:59:41,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, ASSIGN}] 2024-12-11T22:59:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T22:59:41,055 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, ASSIGN 2024-12-11T22:59:41,055 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, ASSIGN 2024-12-11T22:59:41,056 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T22:59:41,056 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T22:59:41,206 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T22:59:41,207 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=3969e48306fccb73cb0670db9b3ca4a5, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:59:41,207 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=0755d5b54c8766db1089850dbb01c1c5, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:59:41,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, ASSIGN because future has completed 2024-12-11T22:59:41,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5, server=b762025f20c5,41635,1733957771023}] 2024-12-11T22:59:41,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, ASSIGN because future has completed 2024-12-11T22:59:41,210 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0755d5b54c8766db1089850dbb01c1c5, server=b762025f20c5,33867,1733957770938}] 2024-12-11T22:59:41,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T22:59:41,364 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,364 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,364 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 3969e48306fccb73cb0670db9b3ca4a5, NAME => 'testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T22:59:41,364 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 0755d5b54c8766db1089850dbb01c1c5, NAME => 'testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T22:59:41,364 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. service=AccessControlService 2024-12-11T22:59:41,364 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. service=AccessControlService 2024-12-11T22:59:41,364 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:59:41,364 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,365 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,366 INFO [StoreOpener-3969e48306fccb73cb0670db9b3ca4a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,366 INFO [StoreOpener-0755d5b54c8766db1089850dbb01c1c5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,367 INFO [StoreOpener-3969e48306fccb73cb0670db9b3ca4a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3969e48306fccb73cb0670db9b3ca4a5 columnFamilyName cf 2024-12-11T22:59:41,367 INFO [StoreOpener-0755d5b54c8766db1089850dbb01c1c5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0755d5b54c8766db1089850dbb01c1c5 columnFamilyName cf 2024-12-11T22:59:41,368 DEBUG [StoreOpener-3969e48306fccb73cb0670db9b3ca4a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:41,368 DEBUG [StoreOpener-0755d5b54c8766db1089850dbb01c1c5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:41,369 INFO [StoreOpener-0755d5b54c8766db1089850dbb01c1c5-1 {}] regionserver.HStore(327): Store=0755d5b54c8766db1089850dbb01c1c5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:59:41,369 INFO [StoreOpener-3969e48306fccb73cb0670db9b3ca4a5-1 {}] regionserver.HStore(327): Store=3969e48306fccb73cb0670db9b3ca4a5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:59:41,369 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,369 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,370 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,370 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,370 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,370 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,371 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,371 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,371 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,371 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,372 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,372 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,374 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:59:41,374 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 3969e48306fccb73cb0670db9b3ca4a5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60960271, jitterRate=-0.09162117540836334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:59:41,375 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,375 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 3969e48306fccb73cb0670db9b3ca4a5: Running coprocessor pre-open hook at 1733957981365Writing region info on filesystem at 1733957981365Initializing all the Stores at 1733957981366 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957981366Cleaning up temporary data from old regions at 1733957981371 (+5 ms)Running coprocessor post-open hooks at 1733957981375 (+4 ms)Region opened successfully at 1733957981375 2024-12-11T22:59:41,376 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5., pid=117, masterSystemTime=1733957981360 2024-12-11T22:59:41,377 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,377 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,378 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=3969e48306fccb73cb0670db9b3ca4a5, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T22:59:41,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T22:59:41,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-12-11T22:59:41,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5, server=b762025f20c5,41635,1733957771023 in 171 msec 2024-12-11T22:59:41,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, ASSIGN in 328 msec 2024-12-11T22:59:41,387 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:59:41,388 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 0755d5b54c8766db1089850dbb01c1c5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73478298, jitterRate=0.09491196274757385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:59:41,388 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,388 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 0755d5b54c8766db1089850dbb01c1c5: Running coprocessor pre-open hook at 1733957981365Writing region info on filesystem at 1733957981365Initializing all the Stores at 1733957981366 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733957981366Cleaning up temporary data from old regions at 1733957981371 (+5 ms)Running coprocessor post-open hooks at 1733957981388 (+17 ms)Region opened successfully at 1733957981388 2024-12-11T22:59:41,389 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5., pid=118, masterSystemTime=1733957981361 2024-12-11T22:59:41,390 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,390 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,391 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=0755d5b54c8766db1089850dbb01c1c5, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T22:59:41,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0755d5b54c8766db1089850dbb01c1c5, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T22:59:41,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-12-11T22:59:41,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 0755d5b54c8766db1089850dbb01c1c5, server=b762025f20c5,33867,1733957770938 in 183 msec 2024-12-11T22:59:41,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-11T22:59:41,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, ASSIGN in 341 msec 2024-12-11T22:59:41,397 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:59:41,397 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733957981397"}]},"ts":"1733957981397"} 2024-12-11T22:59:41,398 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-11T22:59:41,399 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:59:41,399 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-11T22:59:41,401 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T22:59:41,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:41,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:41,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:41,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T22:59:41,466 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:41,466 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:41,466 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:41,466 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T22:59:41,468 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 521 msec 2024-12-11T22:59:41,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T22:59:41,576 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T22:59:41,576 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-11T22:59:41,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,579 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:59:41,581 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,586 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,591 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T22:59:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957981594 (current time:1733957981594). 2024-12-11T22:59:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:59:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-11T22:59:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:59:41,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2240b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:41,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:41,596 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:41,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:41,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:41,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f287df7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:41,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:41,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,597 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:41,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2692f020, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:41,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:41,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:41,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:41,601 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:41,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:41,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,601 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:41,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cddc3b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:41,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:41,603 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:41,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:41,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:41,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56ac404e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:41,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:41,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,605 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:41,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c8f2e28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:41,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:41,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:41,608 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:41,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:59:41,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:41,610 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:41,611 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:41,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:41,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,612 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:41,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T22:59:41,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:59:41,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T22:59:41,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-11T22:59:41,614 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:59:41,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T22:59:41,615 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:59:41,617 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:59:41,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742096_1272 (size=161) 2024-12-11T22:59:41,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742096_1272 (size=161) 2024-12-11T22:59:41,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742096_1272 (size=161) 2024-12-11T22:59:41,625 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:59:41,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5}] 2024-12-11T22:59:41,626 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,626 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T22:59:41,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-11T22:59:41,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-11T22:59:41,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 0755d5b54c8766db1089850dbb01c1c5: 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 3969e48306fccb73cb0670db9b3ca4a5: 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. for emptySnaptb0-testConsecutiveExports completed. 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. for emptySnaptb0-testConsecutiveExports completed. 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:59:41,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T22:59:41,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742097_1273 (size=68) 2024-12-11T22:59:41,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742097_1273 (size=68) 2024-12-11T22:59:41,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742097_1273 (size=68) 2024-12-11T22:59:41,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-11T22:59:41,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-11T22:59:41,795 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,795 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:41,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5 in 172 msec 2024-12-11T22:59:41,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742098_1274 (size=68) 2024-12-11T22:59:41,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742098_1274 (size=68) 2024-12-11T22:59:41,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742098_1274 (size=68) 2024-12-11T22:59:41,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:41,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-11T22:59:41,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-11T22:59:41,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:41,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-11T22:59:41,829 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:59:41,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5 in 202 msec 2024-12-11T22:59:41,830 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:59:41,836 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:59:41,836 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:59:41,836 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:41,837 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T22:59:41,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742099_1275 (size=60) 2024-12-11T22:59:41,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742099_1275 (size=60) 2024-12-11T22:59:41,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742099_1275 (size=60) 2024-12-11T22:59:41,850 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:59:41,851 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-11T22:59:41,851 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-11T22:59:41,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742100_1276 (size=641) 2024-12-11T22:59:41,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742100_1276 (size=641) 2024-12-11T22:59:41,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742100_1276 (size=641) 2024-12-11T22:59:41,877 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:59:41,883 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:59:41,883 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-11T22:59:41,885 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:59:41,885 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-11T22:59:41,886 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 273 msec 2024-12-11T22:59:41,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T22:59:41,935 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T22:59:41,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:59:41,948 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T22:59:41,950 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-11T22:59:41,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:41,953 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:59:41,955 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,960 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T22:59:41,970 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733957981971 (current time:1733957981971). 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238548b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:41,973 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:41,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:41,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:41,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d50709a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:41,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:41,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,975 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:41,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dfca17c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:41,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:41,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:41,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55838, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:41,980 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,980 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cfa726, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T22:59:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:59:41,992 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T22:59:41,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:59:41,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T22:59:41,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1accf964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T22:59:41,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:59:41,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:41,994 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:59:41,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3509921a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:59:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:59:41,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T22:59:41,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:41,998 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:42,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T22:59:42,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:59:42,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:59:42,003 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T22:59:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T22:59:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:59:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T22:59:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T22:59:42,006 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:59:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T22:59:42,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-11T22:59:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T22:59:42,009 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T22:59:42,011 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T22:59:42,014 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T22:59:42,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742101_1277 (size=156) 2024-12-11T22:59:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742101_1277 (size=156) 2024-12-11T22:59:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742101_1277 (size=156) 2024-12-11T22:59:42,028 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T22:59:42,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5}] 2024-12-11T22:59:42,029 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:42,029 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T22:59:42,173 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000003/launch_container.sh] 2024-12-11T22:59:42,174 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000003/container_tokens] 2024-12-11T22:59:42,174 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000003/sysfs] 2024-12-11T22:59:42,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-11T22:59:42,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:42,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-11T22:59:42,189 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 3969e48306fccb73cb0670db9b3ca4a5 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T22:59:42,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:42,189 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 0755d5b54c8766db1089850dbb01c1c5 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T22:59:42,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5 is 71, key is 1e2118c61cb20c02066a39cbaa22b10b/cf:q/1733957981948/Put/seqid=0 2024-12-11T22:59:42,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5 is 71, key is 0f55f0420df389737671aae71025fd2e/cf:q/1733957981944/Put/seqid=0 2024-12-11T22:59:42,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742102_1278 (size=8241) 2024-12-11T22:59:42,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742102_1278 (size=8241) 2024-12-11T22:59:42,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742102_1278 (size=8241) 2024-12-11T22:59:42,244 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:42,268 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:42,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/.tmp/cf/f072194800b54b7c888d2c66aeda32b2, store: [table=testtb-testConsecutiveExports family=cf region=3969e48306fccb73cb0670db9b3ca4a5] 2024-12-11T22:59:42,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/.tmp/cf/f072194800b54b7c888d2c66aeda32b2 is 206, key is 1f528167d24a61642fc7f3c4c4956c1c5/cf:q/1733957981948/Put/seqid=0 2024-12-11T22:59:42,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742103_1279 (size=5032) 2024-12-11T22:59:42,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742103_1279 (size=5032) 2024-12-11T22:59:42,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742103_1279 (size=5032) 2024-12-11T22:59:42,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:42,291 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:42,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/.tmp/cf/db20839cc3714d31aa04593a430b24b6, store: [table=testtb-testConsecutiveExports family=cf region=0755d5b54c8766db1089850dbb01c1c5] 2024-12-11T22:59:42,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/.tmp/cf/db20839cc3714d31aa04593a430b24b6 is 206, key is 0febe27c3d4bba3796c245efa53b4c693/cf:q/1733957981944/Put/seqid=0 2024-12-11T22:59:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742104_1280 (size=15055) 2024-12-11T22:59:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742104_1280 (size=15055) 2024-12-11T22:59:42,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742104_1280 (size=15055) 2024-12-11T22:59:42,307 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/.tmp/cf/f072194800b54b7c888d2c66aeda32b2 2024-12-11T22:59:42,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/.tmp/cf/f072194800b54b7c888d2c66aeda32b2 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf/f072194800b54b7c888d2c66aeda32b2 2024-12-11T22:59:42,323 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf/f072194800b54b7c888d2c66aeda32b2, entries=48, sequenceid=6, filesize=14.7 K 2024-12-11T22:59:42,324 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 3969e48306fccb73cb0670db9b3ca4a5 in 135ms, sequenceid=6, compaction requested=false 2024-12-11T22:59:42,324 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-11T22:59:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T22:59:42,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 3969e48306fccb73cb0670db9b3ca4a5: 2024-12-11T22:59:42,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. for snaptb0-testConsecutiveExports completed. 2024-12-11T22:59:42,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-11T22:59:42,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:42,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf/f072194800b54b7c888d2c66aeda32b2] hfiles 2024-12-11T22:59:42,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf/f072194800b54b7c888d2c66aeda32b2 for snapshot=snaptb0-testConsecutiveExports 2024-12-11T22:59:42,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742105_1281 (size=5700) 2024-12-11T22:59:42,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742105_1281 (size=5700) 2024-12-11T22:59:42,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742105_1281 (size=5700) 2024-12-11T22:59:42,342 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/.tmp/cf/db20839cc3714d31aa04593a430b24b6 2024-12-11T22:59:42,350 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/.tmp/cf/db20839cc3714d31aa04593a430b24b6 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf/db20839cc3714d31aa04593a430b24b6 2024-12-11T22:59:42,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf/db20839cc3714d31aa04593a430b24b6, entries=2, sequenceid=6, filesize=5.6 K 2024-12-11T22:59:42,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 0755d5b54c8766db1089850dbb01c1c5 in 173ms, sequenceid=6, compaction requested=false 2024-12-11T22:59:42,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 0755d5b54c8766db1089850dbb01c1c5: 2024-12-11T22:59:42,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. for snaptb0-testConsecutiveExports completed. 2024-12-11T22:59:42,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-11T22:59:42,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T22:59:42,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf/db20839cc3714d31aa04593a430b24b6] hfiles 2024-12-11T22:59:42,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf/db20839cc3714d31aa04593a430b24b6 for snapshot=snaptb0-testConsecutiveExports 2024-12-11T22:59:42,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742106_1282 (size=107) 2024-12-11T22:59:42,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742106_1282 (size=107) 2024-12-11T22:59:42,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742106_1282 (size=107) 2024-12-11T22:59:42,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T22:59:42,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-11T22:59:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-11T22:59:42,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:42,385 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:42,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5 in 359 msec 2024-12-11T22:59:42,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742107_1283 (size=107) 2024-12-11T22:59:42,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742107_1283 (size=107) 2024-12-11T22:59:42,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742107_1283 (size=107) 2024-12-11T22:59:42,413 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T22:59:42,413 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-11T22:59:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-11T22:59:42,414 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:42,414 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:42,417 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-12-11T22:59:42,417 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T22:59:42,417 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0755d5b54c8766db1089850dbb01c1c5 in 387 msec 2024-12-11T22:59:42,421 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T22:59:42,424 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T22:59:42,424 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T22:59:42,424 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:59:42,431 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5] hfiles 2024-12-11T22:59:42,431 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T22:59:42,431 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5 2024-12-11T22:59:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742108_1284 (size=291) 2024-12-11T22:59:42,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742108_1284 (size=291) 2024-12-11T22:59:42,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742108_1284 (size=291) 2024-12-11T22:59:42,479 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T22:59:42,479 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-11T22:59:42,480 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T22:59:42,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742109_1285 (size=951) 2024-12-11T22:59:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742109_1285 (size=951) 2024-12-11T22:59:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742109_1285 (size=951) 2024-12-11T22:59:42,541 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T22:59:42,564 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T22:59:42,564 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T22:59:42,566 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T22:59:42,567 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-11T22:59:42,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 562 msec 2024-12-11T22:59:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T22:59:42,635 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T22:59:42,636 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635 2024-12-11T22:59:42,636 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:42,676 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T22:59:42,676 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@758cec5e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T22:59:42,679 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T22:59:42,684 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T22:59:42,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:42,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:42,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-16399137418437830980.jar 2024-12-11T22:59:43,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-3109227744997370382.jar 2024-12-11T22:59:43,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T22:59:43,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T22:59:43,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T22:59:43,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T22:59:43,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T22:59:43,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T22:59:43,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T22:59:43,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T22:59:43,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T22:59:43,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T22:59:43,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T22:59:43,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T22:59:43,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:43,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:43,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:59:43,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:43,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T22:59:43,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:59:43,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T22:59:43,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742110_1286 (size=131440) 2024-12-11T22:59:43,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742110_1286 (size=131440) 2024-12-11T22:59:43,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742110_1286 (size=131440) 2024-12-11T22:59:43,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742111_1287 (size=4188619) 2024-12-11T22:59:43,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742111_1287 (size=4188619) 2024-12-11T22:59:43,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742111_1287 (size=4188619) 2024-12-11T22:59:43,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742112_1288 (size=1323991) 2024-12-11T22:59:43,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742112_1288 (size=1323991) 2024-12-11T22:59:43,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742112_1288 (size=1323991) 2024-12-11T22:59:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742113_1289 (size=903541) 2024-12-11T22:59:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742113_1289 (size=903541) 2024-12-11T22:59:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742113_1289 (size=903541) 2024-12-11T22:59:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742114_1290 (size=8360360) 2024-12-11T22:59:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742114_1290 (size=8360360) 2024-12-11T22:59:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742114_1290 (size=8360360) 2024-12-11T22:59:43,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742115_1291 (size=1877034) 2024-12-11T22:59:43,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742115_1291 (size=1877034) 2024-12-11T22:59:43,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742115_1291 (size=1877034) 2024-12-11T22:59:43,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742116_1292 (size=77835) 2024-12-11T22:59:43,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742116_1292 (size=77835) 2024-12-11T22:59:43,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742116_1292 (size=77835) 2024-12-11T22:59:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742117_1293 (size=30949) 2024-12-11T22:59:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742117_1293 (size=30949) 2024-12-11T22:59:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742117_1293 (size=30949) 2024-12-11T22:59:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742118_1294 (size=1597213) 2024-12-11T22:59:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742118_1294 (size=1597213) 2024-12-11T22:59:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742118_1294 (size=1597213) 2024-12-11T22:59:43,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742119_1295 (size=4695811) 2024-12-11T22:59:43,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742119_1295 (size=4695811) 2024-12-11T22:59:43,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742119_1295 (size=4695811) 2024-12-11T22:59:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742120_1296 (size=232957) 2024-12-11T22:59:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742120_1296 (size=232957) 2024-12-11T22:59:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742120_1296 (size=232957) 2024-12-11T22:59:43,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742121_1297 (size=127628) 2024-12-11T22:59:43,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742121_1297 (size=127628) 2024-12-11T22:59:43,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742121_1297 (size=127628) 2024-12-11T22:59:43,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742122_1298 (size=20406) 2024-12-11T22:59:43,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742122_1298 (size=20406) 2024-12-11T22:59:43,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742122_1298 (size=20406) 2024-12-11T22:59:43,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742123_1299 (size=443172) 2024-12-11T22:59:43,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742123_1299 (size=443172) 2024-12-11T22:59:43,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742123_1299 (size=443172) 2024-12-11T22:59:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742124_1300 (size=5175431) 2024-12-11T22:59:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742124_1300 (size=5175431) 2024-12-11T22:59:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742124_1300 (size=5175431) 2024-12-11T22:59:44,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742125_1301 (size=217634) 2024-12-11T22:59:44,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742125_1301 (size=217634) 2024-12-11T22:59:44,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742125_1301 (size=217634) 2024-12-11T22:59:44,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742126_1302 (size=1832290) 2024-12-11T22:59:44,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742126_1302 (size=1832290) 2024-12-11T22:59:44,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742126_1302 (size=1832290) 2024-12-11T22:59:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742127_1303 (size=322274) 2024-12-11T22:59:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742127_1303 (size=322274) 2024-12-11T22:59:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742127_1303 (size=322274) 2024-12-11T22:59:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742128_1304 (size=503880) 2024-12-11T22:59:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742128_1304 (size=503880) 2024-12-11T22:59:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742128_1304 (size=503880) 2024-12-11T22:59:44,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742129_1305 (size=29229) 2024-12-11T22:59:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742129_1305 (size=29229) 2024-12-11T22:59:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742129_1305 (size=29229) 2024-12-11T22:59:44,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742130_1306 (size=24096) 2024-12-11T22:59:44,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742130_1306 (size=24096) 2024-12-11T22:59:44,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742130_1306 (size=24096) 2024-12-11T22:59:44,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742131_1307 (size=111872) 2024-12-11T22:59:44,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742131_1307 (size=111872) 2024-12-11T22:59:44,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742131_1307 (size=111872) 2024-12-11T22:59:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742132_1308 (size=6426222) 2024-12-11T22:59:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742132_1308 (size=6426222) 2024-12-11T22:59:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742132_1308 (size=6426222) 2024-12-11T22:59:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742133_1309 (size=45609) 2024-12-11T22:59:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742133_1309 (size=45609) 2024-12-11T22:59:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742133_1309 (size=45609) 2024-12-11T22:59:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742134_1310 (size=136454) 2024-12-11T22:59:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742134_1310 (size=136454) 2024-12-11T22:59:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742134_1310 (size=136454) 2024-12-11T22:59:44,129 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T22:59:44,132 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-11T22:59:44,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-12-11T22:59:44,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T22:59:44,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-11T22:59:44,146 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-11T22:59:44,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742135_1311 (size=1023) 2024-12-11T22:59:44,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742135_1311 (size=1023) 2024-12-11T22:59:44,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742135_1311 (size=1023) 2024-12-11T22:59:44,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742136_1312 (size=35) 2024-12-11T22:59:44,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742136_1312 (size=35) 2024-12-11T22:59:44,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742136_1312 (size=35) 2024-12-11T22:59:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742137_1313 (size=304043) 2024-12-11T22:59:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742137_1313 (size=304043) 2024-12-11T22:59:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742137_1313 (size=304043) 2024-12-11T22:59:44,903 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:59:44,903 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T22:59:44,910 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0004_000001 (auth:SIMPLE) from 127.0.0.1:35330 2024-12-11T22:59:44,919 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000001/launch_container.sh] 2024-12-11T22:59:44,919 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000001/container_tokens] 2024-12-11T22:59:44,919 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0004/container_1733957780200_0004_01_000001/sysfs] 2024-12-11T22:59:45,752 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:59:45,838 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:55906 2024-12-11T22:59:50,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-11T22:59:50,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-11T22:59:50,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-11T22:59:51,738 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:40572 2024-12-11T22:59:52,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742138_1314 (size=349741) 2024-12-11T22:59:52,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742138_1314 (size=349741) 2024-12-11T22:59:52,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742138_1314 (size=349741) 2024-12-11T22:59:53,945 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:41032 2024-12-11T22:59:53,945 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:36826 2024-12-11T22:59:54,831 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:41046 2024-12-11T22:59:54,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:36836 2024-12-11T22:59:55,622 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T22:59:56,907 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T22:59:58,188 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000002/launch_container.sh] 2024-12-11T22:59:58,188 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000002/container_tokens] 2024-12-11T22:59:58,188 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000002/sysfs] 2024-12-11T23:00:00,245 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000003/launch_container.sh] 2024-12-11T23:00:00,245 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000003/container_tokens] 2024-12-11T23:00:00,245 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000003/sysfs] 2024-12-11T23:00:00,491 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000004/launch_container.sh] 2024-12-11T23:00:00,491 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000004/container_tokens] 2024-12-11T23:00:00,491 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000004/sysfs] 2024-12-11T23:00:00,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742139_1315 (size=31811) 2024-12-11T23:00:00,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742139_1315 (size=31811) 2024-12-11T23:00:00,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742139_1315 (size=31811) 2024-12-11T23:00:00,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742140_1316 (size=463) 2024-12-11T23:00:00,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742140_1316 (size=463) 2024-12-11T23:00:00,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742140_1316 (size=463) 2024-12-11T23:00:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742141_1317 (size=31811) 2024-12-11T23:00:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742141_1317 (size=31811) 2024-12-11T23:00:00,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742141_1317 (size=31811) 2024-12-11T23:00:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742142_1318 (size=349741) 2024-12-11T23:00:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742142_1318 (size=349741) 2024-12-11T23:00:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742142_1318 (size=349741) 2024-12-11T23:00:01,018 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:48288 2024-12-11T23:00:01,025 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:54942 2024-12-11T23:00:01,032 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:54946 2024-12-11T23:00:01,037 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733957780200_0005_01_000005 is : 143 2024-12-11T23:00:01,049 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000005/launch_container.sh] 2024-12-11T23:00:01,050 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000005/container_tokens] 2024-12-11T23:00:01,050 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000005/sysfs] 2024-12-11T23:00:02,387 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T23:00:02,387 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T23:00:02,390 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-11T23:00:02,390 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T23:00:02,391 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T23:00:02,391 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T23:00:02,393 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T23:00:02,393 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T23:00:02,393 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@758cec5e in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T23:00:02,393 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T23:00:02,393 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T23:00:02,395 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:02,439 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:02,439 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@758cec5e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T23:00:02,443 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:00:02,462 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T23:00:02,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:02,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:02,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,537 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-474375719664743148.jar 2024-12-11T23:00:03,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,538 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-8703843816539518996.jar 2024-12-11T23:00:03,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:03,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T23:00:03,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T23:00:03,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T23:00:03,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T23:00:03,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T23:00:03,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T23:00:03,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T23:00:03,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T23:00:03,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T23:00:03,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T23:00:03,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T23:00:03,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:03,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:03,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:00:03,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:03,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:03,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:00:03,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:00:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742143_1319 (size=131440) 2024-12-11T23:00:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742143_1319 (size=131440) 2024-12-11T23:00:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742143_1319 (size=131440) 2024-12-11T23:00:03,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742144_1320 (size=4188619) 2024-12-11T23:00:03,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742144_1320 (size=4188619) 2024-12-11T23:00:03,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742144_1320 (size=4188619) 2024-12-11T23:00:03,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742145_1321 (size=1323991) 2024-12-11T23:00:03,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742145_1321 (size=1323991) 2024-12-11T23:00:03,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742145_1321 (size=1323991) 2024-12-11T23:00:03,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742146_1322 (size=903541) 2024-12-11T23:00:03,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742146_1322 (size=903541) 2024-12-11T23:00:03,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742146_1322 (size=903541) 2024-12-11T23:00:03,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742147_1323 (size=8360360) 2024-12-11T23:00:03,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742147_1323 (size=8360360) 2024-12-11T23:00:03,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742147_1323 (size=8360360) 2024-12-11T23:00:03,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742148_1324 (size=6426222) 2024-12-11T23:00:03,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742148_1324 (size=6426222) 2024-12-11T23:00:03,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742148_1324 (size=6426222) 2024-12-11T23:00:03,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742149_1325 (size=1877034) 2024-12-11T23:00:03,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742149_1325 (size=1877034) 2024-12-11T23:00:03,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742149_1325 (size=1877034) 2024-12-11T23:00:03,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742150_1326 (size=77835) 2024-12-11T23:00:03,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742150_1326 (size=77835) 2024-12-11T23:00:03,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742150_1326 (size=77835) 2024-12-11T23:00:03,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742151_1327 (size=443172) 2024-12-11T23:00:03,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742151_1327 (size=443172) 2024-12-11T23:00:03,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742151_1327 (size=443172) 2024-12-11T23:00:03,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742152_1328 (size=30949) 2024-12-11T23:00:03,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742152_1328 (size=30949) 2024-12-11T23:00:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742152_1328 (size=30949) 2024-12-11T23:00:03,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742153_1329 (size=1597213) 2024-12-11T23:00:03,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742153_1329 (size=1597213) 2024-12-11T23:00:03,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742153_1329 (size=1597213) 2024-12-11T23:00:03,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742154_1330 (size=4695811) 2024-12-11T23:00:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742154_1330 (size=4695811) 2024-12-11T23:00:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742154_1330 (size=4695811) 2024-12-11T23:00:03,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742155_1331 (size=232957) 2024-12-11T23:00:03,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742155_1331 (size=232957) 2024-12-11T23:00:03,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742155_1331 (size=232957) 2024-12-11T23:00:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742156_1332 (size=127628) 2024-12-11T23:00:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742156_1332 (size=127628) 2024-12-11T23:00:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742156_1332 (size=127628) 2024-12-11T23:00:03,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742157_1333 (size=20406) 2024-12-11T23:00:03,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742157_1333 (size=20406) 2024-12-11T23:00:03,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742157_1333 (size=20406) 2024-12-11T23:00:03,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742158_1334 (size=5175431) 2024-12-11T23:00:03,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742158_1334 (size=5175431) 2024-12-11T23:00:03,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742158_1334 (size=5175431) 2024-12-11T23:00:03,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742159_1335 (size=217634) 2024-12-11T23:00:03,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742159_1335 (size=217634) 2024-12-11T23:00:03,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742159_1335 (size=217634) 2024-12-11T23:00:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742160_1336 (size=1832290) 2024-12-11T23:00:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742160_1336 (size=1832290) 2024-12-11T23:00:03,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742160_1336 (size=1832290) 2024-12-11T23:00:03,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742161_1337 (size=322274) 2024-12-11T23:00:03,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742161_1337 (size=322274) 2024-12-11T23:00:03,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742161_1337 (size=322274) 2024-12-11T23:00:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742162_1338 (size=503880) 2024-12-11T23:00:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742162_1338 (size=503880) 2024-12-11T23:00:03,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742162_1338 (size=503880) 2024-12-11T23:00:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742163_1339 (size=29229) 2024-12-11T23:00:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742163_1339 (size=29229) 2024-12-11T23:00:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742163_1339 (size=29229) 2024-12-11T23:00:03,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742164_1340 (size=24096) 2024-12-11T23:00:03,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742164_1340 (size=24096) 2024-12-11T23:00:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742164_1340 (size=24096) 2024-12-11T23:00:03,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742165_1341 (size=111872) 2024-12-11T23:00:03,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742165_1341 (size=111872) 2024-12-11T23:00:03,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742165_1341 (size=111872) 2024-12-11T23:00:04,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742166_1342 (size=45609) 2024-12-11T23:00:04,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742166_1342 (size=45609) 2024-12-11T23:00:04,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742166_1342 (size=45609) 2024-12-11T23:00:04,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742167_1343 (size=136454) 2024-12-11T23:00:04,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742167_1343 (size=136454) 2024-12-11T23:00:04,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742167_1343 (size=136454) 2024-12-11T23:00:04,796 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T23:00:04,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-11T23:00:04,802 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-12-11T23:00:04,802 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T23:00:04,802 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-11T23:00:04,802 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-11T23:00:04,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742168_1344 (size=1023) 2024-12-11T23:00:04,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742168_1344 (size=1023) 2024-12-11T23:00:04,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742168_1344 (size=1023) 2024-12-11T23:00:04,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742169_1345 (size=35) 2024-12-11T23:00:04,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742169_1345 (size=35) 2024-12-11T23:00:04,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742169_1345 (size=35) 2024-12-11T23:00:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742170_1346 (size=304039) 2024-12-11T23:00:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742170_1346 (size=304039) 2024-12-11T23:00:04,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742170_1346 (size=304039) 2024-12-11T23:00:07,104 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:00:07,104 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:00:07,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0005_000001 (auth:SIMPLE) from 127.0.0.1:48298 2024-12-11T23:00:07,118 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000001/launch_container.sh] 2024-12-11T23:00:07,118 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000001/container_tokens] 2024-12-11T23:00:07,118 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0005/container_1733957780200_0005_01_000001/sysfs] 2024-12-11T23:00:08,050 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:43848 2024-12-11T23:00:08,685 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:00:14,056 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:56108 2024-12-11T23:00:14,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742171_1347 (size=349737) 2024-12-11T23:00:14,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742171_1347 (size=349737) 2024-12-11T23:00:14,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742171_1347 (size=349737) 2024-12-11T23:00:15,800 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0755d5b54c8766db1089850dbb01c1c5 changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:00:15,800 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3969e48306fccb73cb0670db9b3ca4a5 changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:00:16,268 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:44670 2024-12-11T23:00:16,268 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:43852 2024-12-11T23:00:17,137 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:43868 2024-12-11T23:00:17,144 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:44684 2024-12-11T23:00:20,110 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T23:00:20,894 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000003/launch_container.sh] 2024-12-11T23:00:20,894 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000003/container_tokens] 2024-12-11T23:00:20,894 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000003/sysfs] 2024-12-11T23:00:22,832 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000002/launch_container.sh] 2024-12-11T23:00:22,832 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000002/container_tokens] 2024-12-11T23:00:22,832 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000002/sysfs] 2024-12-11T23:00:23,106 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000005/launch_container.sh] 2024-12-11T23:00:23,107 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000005/container_tokens] 2024-12-11T23:00:23,107 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000005/sysfs] 2024-12-11T23:00:23,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742172_1348 (size=29739) 2024-12-11T23:00:23,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742172_1348 (size=29739) 2024-12-11T23:00:23,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742172_1348 (size=29739) 2024-12-11T23:00:23,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742173_1349 (size=463) 2024-12-11T23:00:23,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742173_1349 (size=463) 2024-12-11T23:00:23,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742173_1349 (size=463) 2024-12-11T23:00:23,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742174_1350 (size=29739) 2024-12-11T23:00:23,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742174_1350 (size=29739) 2024-12-11T23:00:23,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742174_1350 (size=29739) 2024-12-11T23:00:23,351 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000004/launch_container.sh] 2024-12-11T23:00:23,351 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000004/container_tokens] 2024-12-11T23:00:23,351 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000004/sysfs] 2024-12-11T23:00:23,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742175_1351 (size=349737) 2024-12-11T23:00:23,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742175_1351 (size=349737) 2024-12-11T23:00:23,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742175_1351 (size=349737) 2024-12-11T23:00:23,377 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:51706 2024-12-11T23:00:25,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T23:00:25,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T23:00:25,074 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-11T23:00:25,074 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T23:00:25,074 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T23:00:25,075 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T23:00:25,075 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T23:00:25,075 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T23:00:25,075 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@758cec5e in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T23:00:25,075 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T23:00:25,076 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733957982635/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T23:00:25,095 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-11T23:00:25,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T23:00:25,098 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958025098"}]},"ts":"1733958025098"} 2024-12-11T23:00:25,100 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-11T23:00:25,100 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-11T23:00:25,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-11T23:00:25,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, UNASSIGN}] 2024-12-11T23:00:25,103 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, UNASSIGN 2024-12-11T23:00:25,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, UNASSIGN 2024-12-11T23:00:25,104 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=3969e48306fccb73cb0670db9b3ca4a5, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:25,104 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0755d5b54c8766db1089850dbb01c1c5, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:25,105 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=b762025f20c5,41635,1733957771023, table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T23:00:25,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, UNASSIGN because future has completed 2024-12-11T23:00:25,106 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:25,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0755d5b54c8766db1089850dbb01c1c5, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:25,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, UNASSIGN because future has completed 2024-12-11T23:00:25,107 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:25,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:00:25,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T23:00:25,259 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T23:00:25,259 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 3969e48306fccb73cb0670db9b3ca4a5, disabling compactions & flushes 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 0755d5b54c8766db1089850dbb01c1c5, disabling compactions & flushes 2024-12-11T23:00:25,259 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T23:00:25,259 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. after waiting 0 ms 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. after waiting 0 ms 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T23:00:25,259 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T23:00:25,372 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:00:25,373 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:00:25,373 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:25,373 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5. 2024-12-11T23:00:25,373 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:25,373 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 3969e48306fccb73cb0670db9b3ca4a5: Waiting for close lock at 1733958025259Running coprocessor pre-close hooks at 1733958025259Disabling compacts and flushes for region at 1733958025259Disabling writes for close at 1733958025259Writing region close event to WAL at 1733958025260 (+1 ms)Running coprocessor post-close hooks at 1733958025373 (+113 ms)Closed at 1733958025373 2024-12-11T23:00:25,373 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5. 2024-12-11T23:00:25,373 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 0755d5b54c8766db1089850dbb01c1c5: Waiting for close lock at 1733958025259Running coprocessor pre-close hooks at 1733958025259Disabling compacts and flushes for region at 1733958025259Disabling writes for close at 1733958025259Writing region close event to WAL at 1733958025260 (+1 ms)Running coprocessor post-close hooks at 1733958025373 (+113 ms)Closed at 1733958025373 2024-12-11T23:00:25,375 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T23:00:25,375 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=3969e48306fccb73cb0670db9b3ca4a5, regionState=CLOSED 2024-12-11T23:00:25,375 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 0755d5b54c8766db1089850dbb01c1c5 2024-12-11T23:00:25,376 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0755d5b54c8766db1089850dbb01c1c5, regionState=CLOSED 2024-12-11T23:00:25,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:00:25,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0755d5b54c8766db1089850dbb01c1c5, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:25,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-11T23:00:25,381 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 3969e48306fccb73cb0670db9b3ca4a5, server=b762025f20c5,41635,1733957771023 in 272 msec 2024-12-11T23:00:25,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-11T23:00:25,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 0755d5b54c8766db1089850dbb01c1c5, server=b762025f20c5,33867,1733957770938 in 273 msec 2024-12-11T23:00:25,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3969e48306fccb73cb0670db9b3ca4a5, UNASSIGN in 279 msec 2024-12-11T23:00:25,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-12-11T23:00:25,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0755d5b54c8766db1089850dbb01c1c5, UNASSIGN in 279 msec 2024-12-11T23:00:25,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-11T23:00:25,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 284 msec 2024-12-11T23:00:25,387 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958025387"}]},"ts":"1733958025387"} 2024-12-11T23:00:25,389 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-11T23:00:25,389 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-11T23:00:25,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 295 msec 2024-12-11T23:00:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T23:00:25,415 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T23:00:25,416 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-11T23:00:25,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,417 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-11T23:00:25,418 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,421 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-11T23:00:25,422 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5 2024-12-11T23:00:25,425 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/recovered.edits] 2024-12-11T23:00:25,431 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T23:00:25,433 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf/db20839cc3714d31aa04593a430b24b6 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/cf/db20839cc3714d31aa04593a430b24b6 2024-12-11T23:00:25,440 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/recovered.edits] 2024-12-11T23:00:25,444 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5/recovered.edits/9.seqid 2024-12-11T23:00:25,497 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/0755d5b54c8766db1089850dbb01c1c5 2024-12-11T23:00:25,498 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf/f072194800b54b7c888d2c66aeda32b2 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/cf/f072194800b54b7c888d2c66aeda32b2 2024-12-11T23:00:25,505 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5/recovered.edits/9.seqid 2024-12-11T23:00:25,506 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testConsecutiveExports/3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T23:00:25,506 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-11T23:00:25,507 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-11T23:00:25,508 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-11T23:00:25,516 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121113e77ccd89c34f699fcad9468f66e6b4_3969e48306fccb73cb0670db9b3ca4a5 2024-12-11T23:00:25,529 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024121164410451ec2f4c13989355b82dd8a375_0755d5b54c8766db1089850dbb01c1c5 2024-12-11T23:00:25,530 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-11T23:00:25,613 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,616 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-11T23:00:25,619 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-11T23:00:25,622 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,622 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-11T23:00:25,622 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958025622"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:25,622 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958025622"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:25,625 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T23:00:25,625 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0755d5b54c8766db1089850dbb01c1c5, NAME => 'testtb-testConsecutiveExports,,1733957980945.0755d5b54c8766db1089850dbb01c1c5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3969e48306fccb73cb0670db9b3ca4a5, NAME => 'testtb-testConsecutiveExports,1,1733957980945.3969e48306fccb73cb0670db9b3ca4a5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T23:00:25,625 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-11T23:00:25,625 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958025625"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:25,627 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-11T23:00:25,628 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T23:00:25,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 212 msec 2024-12-11T23:00:25,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T23:00:25,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T23:00:25,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T23:00:25,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:25,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:25,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:25,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-11T23:00:25,695 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-11T23:00:25,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T23:00:25,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-11T23:00:25,700 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-11T23:00:25,701 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T23:00:25,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-11T23:00:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-11T23:00:25,715 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-11T23:00:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-11T23:00:25,747 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=805 (was 798) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36937 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:34072 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5501 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:56830 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 131458) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-204681041_1 at /127.0.0.1:56620 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-204681041_1 at /127.0.0.1:34040 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:36937 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=777 (was 787), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=766 (was 698) - SystemLoadAverage LEAK? -, ProcessCount=18 (was 15) - ProcessCount LEAK? -, AvailableMemoryMB=3269 (was 3861) 2024-12-11T23:00:25,748 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-11T23:00:25,769 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=805, OpenFileDescriptor=777, MaxFileDescriptor=1048576, SystemLoadAverage=766, ProcessCount=18, AvailableMemoryMB=3266 2024-12-11T23:00:25,769 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-11T23:00:25,771 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:00:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:25,774 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:00:25,775 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-11T23:00:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T23:00:25,776 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:00:25,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742176_1352 (size=458) 2024-12-11T23:00:25,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742176_1352 (size=458) 2024-12-11T23:00:25,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742176_1352 (size=458) 2024-12-11T23:00:25,800 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f587accbc369eb56e08a93392794e478, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:25,800 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ba62783214563f2306fda1446d349a11, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:25,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742178_1354 (size=83) 2024-12-11T23:00:25,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742178_1354 (size=83) 2024-12-11T23:00:25,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742178_1354 (size=83) 2024-12-11T23:00:25,816 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:25,816 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing f587accbc369eb56e08a93392794e478, disabling compactions & flushes 2024-12-11T23:00:25,816 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:25,816 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:25,816 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. after waiting 0 ms 2024-12-11T23:00:25,816 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:25,817 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:25,817 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for f587accbc369eb56e08a93392794e478: Waiting for close lock at 1733958025816Disabling compacts and flushes for region at 1733958025816Disabling writes for close at 1733958025816Writing region close event to WAL at 1733958025817 (+1 ms)Closed at 1733958025817 2024-12-11T23:00:25,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742177_1353 (size=83) 2024-12-11T23:00:25,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742177_1353 (size=83) 2024-12-11T23:00:25,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742177_1353 (size=83) 2024-12-11T23:00:25,839 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:25,840 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing ba62783214563f2306fda1446d349a11, disabling compactions & flushes 2024-12-11T23:00:25,840 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:25,840 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:25,840 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. after waiting 0 ms 2024-12-11T23:00:25,840 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:25,840 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:25,840 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for ba62783214563f2306fda1446d349a11: Waiting for close lock at 1733958025840Disabling compacts and flushes for region at 1733958025840Disabling writes for close at 1733958025840Writing region close event to WAL at 1733958025840Closed at 1733958025840 2024-12-11T23:00:25,841 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:00:25,841 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733958025841"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958025841"}]},"ts":"1733958025841"} 2024-12-11T23:00:25,842 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733958025841"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958025841"}]},"ts":"1733958025841"} 2024-12-11T23:00:25,845 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:00:25,846 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:00:25,847 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958025846"}]},"ts":"1733958025846"} 2024-12-11T23:00:25,854 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-11T23:00:25,854 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:00:25,856 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:00:25,856 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:00:25,856 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:00:25,856 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:00:25,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, ASSIGN}] 2024-12-11T23:00:25,861 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, ASSIGN 2024-12-11T23:00:25,863 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, ASSIGN 2024-12-11T23:00:25,864 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T23:00:25,865 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:00:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T23:00:26,014 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:00:26,015 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=ba62783214563f2306fda1446d349a11, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:26,015 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=f587accbc369eb56e08a93392794e478, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:26,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, ASSIGN because future has completed 2024-12-11T23:00:26,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba62783214563f2306fda1446d349a11, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:00:26,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, ASSIGN because future has completed 2024-12-11T23:00:26,018 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure f587accbc369eb56e08a93392794e478, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T23:00:26,172 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,172 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => f587accbc369eb56e08a93392794e478, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => ba62783214563f2306fda1446d349a11, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. service=AccessControlService 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. service=AccessControlService 2024-12-11T23:00:26,173 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:26,173 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,174 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,174 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,175 INFO [StoreOpener-ba62783214563f2306fda1446d349a11-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,177 INFO [StoreOpener-ba62783214563f2306fda1446d349a11-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba62783214563f2306fda1446d349a11 columnFamilyName cf 2024-12-11T23:00:26,178 DEBUG [StoreOpener-ba62783214563f2306fda1446d349a11-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:26,179 INFO [StoreOpener-ba62783214563f2306fda1446d349a11-1 {}] regionserver.HStore(327): Store=ba62783214563f2306fda1446d349a11/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:26,179 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,180 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,180 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,181 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,181 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,182 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,187 INFO [StoreOpener-f587accbc369eb56e08a93392794e478-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,188 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:26,188 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened ba62783214563f2306fda1446d349a11; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66014811, jitterRate=-0.01630266010761261}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:26,188 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,189 INFO [StoreOpener-f587accbc369eb56e08a93392794e478-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f587accbc369eb56e08a93392794e478 columnFamilyName cf 2024-12-11T23:00:26,189 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for ba62783214563f2306fda1446d349a11: Running coprocessor pre-open hook at 1733958026174Writing region info on filesystem at 1733958026174Initializing all the Stores at 1733958026174Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958026174Cleaning up temporary data from old regions at 1733958026181 (+7 ms)Running coprocessor post-open hooks at 1733958026188 (+7 ms)Region opened successfully at 1733958026189 (+1 ms) 2024-12-11T23:00:26,190 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11., pid=135, masterSystemTime=1733958026169 2024-12-11T23:00:26,190 DEBUG [StoreOpener-f587accbc369eb56e08a93392794e478-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:26,191 INFO [StoreOpener-f587accbc369eb56e08a93392794e478-1 {}] regionserver.HStore(327): Store=f587accbc369eb56e08a93392794e478/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:26,191 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,192 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,192 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,192 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:26,193 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:26,193 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,193 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,193 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=ba62783214563f2306fda1446d349a11, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:26,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba62783214563f2306fda1446d349a11, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:00:26,195 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,197 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:26,198 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened f587accbc369eb56e08a93392794e478; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62716534, jitterRate=-0.06545081734657288}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:26,198 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,198 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for f587accbc369eb56e08a93392794e478: Running coprocessor pre-open hook at 1733958026174Writing region info on filesystem at 1733958026174Initializing all the Stores at 1733958026174Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958026174Cleaning up temporary data from old regions at 1733958026193 (+19 ms)Running coprocessor post-open hooks at 1733958026198 (+5 ms)Region opened successfully at 1733958026198 2024-12-11T23:00:26,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-11T23:00:26,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure ba62783214563f2306fda1446d349a11, server=b762025f20c5,41635,1733957771023 in 179 msec 2024-12-11T23:00:26,199 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478., pid=136, masterSystemTime=1733958026170 2024-12-11T23:00:26,200 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, ASSIGN in 342 msec 2024-12-11T23:00:26,200 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,200 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,201 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=f587accbc369eb56e08a93392794e478, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:26,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure f587accbc369eb56e08a93392794e478, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:26,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-11T23:00:26,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure f587accbc369eb56e08a93392794e478, server=b762025f20c5,33867,1733957770938 in 185 msec 2024-12-11T23:00:26,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-12-11T23:00:26,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, ASSIGN in 349 msec 2024-12-11T23:00:26,207 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:00:26,207 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958026207"}]},"ts":"1733958026207"} 2024-12-11T23:00:26,209 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-11T23:00:26,209 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:00:26,210 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-11T23:00:26,213 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T23:00:26,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:26,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:26,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:26,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:26,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:26,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:26,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:26,231 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:26,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 459 msec 2024-12-11T23:00:26,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T23:00:26,405 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T23:00:26,405 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,408 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:00:26,410 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,416 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,422 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T23:00:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958026426 (current time:1733958026426). 2024-12-11T23:00:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:00:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-11T23:00:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:00:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23a6a217, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:26,428 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:26,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:26,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:26,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a3cc9a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:26,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:26,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,430 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:26,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@244c0c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:26,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:26,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:26,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:26,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:26,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:26,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,439 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:26,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1256d262, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:26,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:26,454 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:26,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:26,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:26,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54881a5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:26,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:26,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,457 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:26,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5612e27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:26,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:26,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:26,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53224, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:26,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:00:26,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:26,466 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:26,467 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,467 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:26,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T23:00:26,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:00:26,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T23:00:26,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-11T23:00:26,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T23:00:26,470 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:00:26,471 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:00:26,473 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:00:26,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742179_1355 (size=215) 2024-12-11T23:00:26,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742179_1355 (size=215) 2024-12-11T23:00:26,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742179_1355 (size=215) 2024-12-11T23:00:26,500 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:00:26,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11}] 2024-12-11T23:00:26,504 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,504 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T23:00:26,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-11T23:00:26,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for f587accbc369eb56e08a93392794e478: 2024-12-11T23:00:26,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:00:26,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for ba62783214563f2306fda1446d349a11: 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T23:00:26,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:26,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:00:26,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742180_1356 (size=86) 2024-12-11T23:00:26,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742180_1356 (size=86) 2024-12-11T23:00:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742180_1356 (size=86) 2024-12-11T23:00:26,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-11T23:00:26,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-11T23:00:26,679 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,679 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478 in 180 msec 2024-12-11T23:00:26,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742181_1357 (size=86) 2024-12-11T23:00:26,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742181_1357 (size=86) 2024-12-11T23:00:26,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:26,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742181_1357 (size=86) 2024-12-11T23:00:26,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-11T23:00:26,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-11T23:00:26,687 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,687 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11 2024-12-11T23:00:26,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-12-11T23:00:26,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11 in 189 msec 2024-12-11T23:00:26,691 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:00:26,692 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:00:26,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:00:26,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:00:26,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:26,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T23:00:26,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742182_1358 (size=78) 2024-12-11T23:00:26,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742182_1358 (size=78) 2024-12-11T23:00:26,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742182_1358 (size=78) 2024-12-11T23:00:26,718 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:00:26,718 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,719 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742183_1359 (size=713) 2024-12-11T23:00:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742183_1359 (size=713) 2024-12-11T23:00:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742183_1359 (size=713) 2024-12-11T23:00:26,737 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:00:26,748 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:00:26,749 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,750 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:00:26,751 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-11T23:00:26,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 283 msec 2024-12-11T23:00:26,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T23:00:26,787 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T23:00:26,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:00:26,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:00:26,804 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:26,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:26,808 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:00:26,811 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,819 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,836 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T23:00:26,840 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T23:00:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958026841 (current time:1733958026841). 2024-12-11T23:00:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:00:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-11T23:00:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:00:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30d0aaa4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:26,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:26,846 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:26,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:26,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:26,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@593b83e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:26,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:26,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,848 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:26,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@329c4a9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:26,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:26,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:26,860 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:26,868 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,869 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:26,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19c266d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:26,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:26,883 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:26,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:26,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:26,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@129bf9b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:26,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:26,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,886 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50094, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:26,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695d844e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:26,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:26,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:26,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:26,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:00:26,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:26,904 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47234, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:26,907 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:26,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:26,908 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:26,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T23:00:26,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:00:26,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T23:00:26,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-11T23:00:26,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T23:00:26,912 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:00:26,918 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:00:26,933 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:00:26,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742184_1360 (size=210) 2024-12-11T23:00:26,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742184_1360 (size=210) 2024-12-11T23:00:26,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742184_1360 (size=210) 2024-12-11T23:00:26,961 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:00:26,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11}] 2024-12-11T23:00:26,962 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478 2024-12-11T23:00:26,962 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11 2024-12-11T23:00:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T23:00:27,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-11T23:00:27,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-11T23:00:27,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:27,114 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing f587accbc369eb56e08a93392794e478 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T23:00:27,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:27,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing ba62783214563f2306fda1446d349a11 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T23:00:27,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478 is 71, key is 0142c07c7c2f21bdc6e8aa3df19a58c2/cf:q/1733958026796/Put/seqid=0 2024-12-11T23:00:27,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11 is 71, key is 11e1e4dffebd00563534b17e8e8f5c75/cf:q/1733958026798/Put/seqid=0 2024-12-11T23:00:27,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742185_1361 (size=5242) 2024-12-11T23:00:27,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742185_1361 (size=5242) 2024-12-11T23:00:27,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742185_1361 (size=5242) 2024-12-11T23:00:27,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:27,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478 2024-12-11T23:00:27,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/.tmp/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=f587accbc369eb56e08a93392794e478] 2024-12-11T23:00:27,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/.tmp/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e is 224, key is 095059c93350fccd37c293dcdc67b2383/cf:q/1733958026796/Put/seqid=0 2024-12-11T23:00:27,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742186_1362 (size=8031) 2024-12-11T23:00:27,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742186_1362 (size=8031) 2024-12-11T23:00:27,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742186_1362 (size=8031) 2024-12-11T23:00:27,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742187_1363 (size=6418) 2024-12-11T23:00:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742187_1363 (size=6418) 2024-12-11T23:00:27,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742187_1363 (size=6418) 2024-12-11T23:00:27,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11 2024-12-11T23:00:27,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/.tmp/cf/3898cb1a5a004b5bb7001a86a926ce40, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=ba62783214563f2306fda1446d349a11] 2024-12-11T23:00:27,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/.tmp/cf/3898cb1a5a004b5bb7001a86a926ce40 is 224, key is 1ba9e0ebdb7059b66e95e69873038ea54/cf:q/1733958026798/Put/seqid=0 2024-12-11T23:00:27,219 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/.tmp/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e 2024-12-11T23:00:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T23:00:27,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/.tmp/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e 2024-12-11T23:00:27,251 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e, entries=5, sequenceid=6, filesize=6.3 K 2024-12-11T23:00:27,255 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for f587accbc369eb56e08a93392794e478 in 140ms, sequenceid=6, compaction requested=false 2024-12-11T23:00:27,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-11T23:00:27,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742188_1364 (size=15277) 2024-12-11T23:00:27,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for f587accbc369eb56e08a93392794e478: 2024-12-11T23:00:27,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T23:00:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742188_1364 (size=15277) 2024-12-11T23:00:27,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:27,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e] hfiles 2024-12-11T23:00:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742188_1364 (size=15277) 2024-12-11T23:00:27,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/.tmp/cf/3898cb1a5a004b5bb7001a86a926ce40 2024-12-11T23:00:27,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/.tmp/cf/3898cb1a5a004b5bb7001a86a926ce40 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf/3898cb1a5a004b5bb7001a86a926ce40 2024-12-11T23:00:27,270 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf/3898cb1a5a004b5bb7001a86a926ce40, entries=45, sequenceid=6, filesize=14.9 K 2024-12-11T23:00:27,272 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for ba62783214563f2306fda1446d349a11 in 156ms, sequenceid=6, compaction requested=false 2024-12-11T23:00:27,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for ba62783214563f2306fda1446d349a11: 2024-12-11T23:00:27,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T23:00:27,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:27,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf/3898cb1a5a004b5bb7001a86a926ce40] hfiles 2024-12-11T23:00:27,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf/3898cb1a5a004b5bb7001a86a926ce40 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742189_1365 (size=125) 2024-12-11T23:00:27,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742189_1365 (size=125) 2024-12-11T23:00:27,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742189_1365 (size=125) 2024-12-11T23:00:27,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:27,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-11T23:00:27,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-11T23:00:27,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region f587accbc369eb56e08a93392794e478 2024-12-11T23:00:27,289 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478 2024-12-11T23:00:27,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f587accbc369eb56e08a93392794e478 in 329 msec 2024-12-11T23:00:27,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742190_1366 (size=125) 2024-12-11T23:00:27,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742190_1366 (size=125) 2024-12-11T23:00:27,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742190_1366 (size=125) 2024-12-11T23:00:27,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:27,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-11T23:00:27,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-11T23:00:27,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region ba62783214563f2306fda1446d349a11 2024-12-11T23:00:27,306 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11 2024-12-11T23:00:27,308 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-11T23:00:27,308 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:00:27,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ba62783214563f2306fda1446d349a11 in 346 msec 2024-12-11T23:00:27,309 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:00:27,312 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:00:27,312 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:00:27,312 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:27,313 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478] hfiles 2024-12-11T23:00:27,313 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11 2024-12-11T23:00:27,313 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478 2024-12-11T23:00:27,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742191_1367 (size=309) 2024-12-11T23:00:27,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742191_1367 (size=309) 2024-12-11T23:00:27,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742191_1367 (size=309) 2024-12-11T23:00:27,342 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:00:27,343 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,343 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742192_1368 (size=1023) 2024-12-11T23:00:27,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742192_1368 (size=1023) 2024-12-11T23:00:27,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742192_1368 (size=1023) 2024-12-11T23:00:27,385 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:00:27,392 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:00:27,393 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:27,394 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:00:27,394 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-11T23:00:27,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 485 msec 2024-12-11T23:00:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T23:00:27,536 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T23:00:27,538 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:00:27,538 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:00:27,539 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T23:00:27,540 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47236, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:00:27,540 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:00:27,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T23:00:27,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:00:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:27,545 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:00:27,545 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:27,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-11T23:00:27,546 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:00:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T23:00:27,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742193_1369 (size=399) 2024-12-11T23:00:27,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742193_1369 (size=399) 2024-12-11T23:00:27,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742193_1369 (size=399) 2024-12-11T23:00:27,565 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3060249aebb25ad453c68d2fbd779e9d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:27,568 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5ee67ce30ed17696dbc3fa4cfc5af3fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742194_1370 (size=85) 2024-12-11T23:00:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742194_1370 (size=85) 2024-12-11T23:00:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742194_1370 (size=85) 2024-12-11T23:00:27,609 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:27,609 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 3060249aebb25ad453c68d2fbd779e9d, disabling compactions & flushes 2024-12-11T23:00:27,609 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,610 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,610 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. after waiting 0 ms 2024-12-11T23:00:27,610 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,610 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,610 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3060249aebb25ad453c68d2fbd779e9d: Waiting for close lock at 1733958027609Disabling compacts and flushes for region at 1733958027609Disabling writes for close at 1733958027610 (+1 ms)Writing region close event to WAL at 1733958027610Closed at 1733958027610 2024-12-11T23:00:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742195_1371 (size=85) 2024-12-11T23:00:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742195_1371 (size=85) 2024-12-11T23:00:27,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742195_1371 (size=85) 2024-12-11T23:00:27,632 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:27,632 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 5ee67ce30ed17696dbc3fa4cfc5af3fe, disabling compactions & flushes 2024-12-11T23:00:27,632 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,632 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,632 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. after waiting 0 ms 2024-12-11T23:00:27,632 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,632 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,632 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5ee67ce30ed17696dbc3fa4cfc5af3fe: Waiting for close lock at 1733958027632Disabling compacts and flushes for region at 1733958027632Disabling writes for close at 1733958027632Writing region close event to WAL at 1733958027632Closed at 1733958027632 2024-12-11T23:00:27,634 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:00:27,634 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733958027634"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958027634"}]},"ts":"1733958027634"} 2024-12-11T23:00:27,635 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733958027634"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958027634"}]},"ts":"1733958027634"} 2024-12-11T23:00:27,637 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:00:27,639 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:00:27,639 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958027639"}]},"ts":"1733958027639"} 2024-12-11T23:00:27,641 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-11T23:00:27,642 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:00:27,644 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:00:27,644 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:00:27,644 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:00:27,644 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:00:27,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, ASSIGN}] 2024-12-11T23:00:27,646 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, ASSIGN 2024-12-11T23:00:27,646 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, ASSIGN 2024-12-11T23:00:27,647 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T23:00:27,647 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:00:27,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T23:00:27,797 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:00:27,798 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=3060249aebb25ad453c68d2fbd779e9d, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:27,798 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=5ee67ce30ed17696dbc3fa4cfc5af3fe, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:27,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, ASSIGN because future has completed 2024-12-11T23:00:27,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ee67ce30ed17696dbc3fa4cfc5af3fe, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:00:27,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, ASSIGN because future has completed 2024-12-11T23:00:27,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3060249aebb25ad453c68d2fbd779e9d, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:27,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T23:00:27,958 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,959 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 5ee67ce30ed17696dbc3fa4cfc5af3fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe.', STARTKEY => '2', ENDKEY => ''} 2024-12-11T23:00:27,959 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,959 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 3060249aebb25ad453c68d2fbd779e9d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d.', STARTKEY => '', ENDKEY => '2'} 2024-12-11T23:00:27,959 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. service=AccessControlService 2024-12-11T23:00:27,959 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. service=AccessControlService 2024-12-11T23:00:27,960 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:27,960 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,960 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,962 INFO [StoreOpener-5ee67ce30ed17696dbc3fa4cfc5af3fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,962 INFO [StoreOpener-3060249aebb25ad453c68d2fbd779e9d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,964 INFO [StoreOpener-3060249aebb25ad453c68d2fbd779e9d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3060249aebb25ad453c68d2fbd779e9d columnFamilyName cf 2024-12-11T23:00:27,964 INFO [StoreOpener-5ee67ce30ed17696dbc3fa4cfc5af3fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ee67ce30ed17696dbc3fa4cfc5af3fe columnFamilyName cf 2024-12-11T23:00:27,964 DEBUG [StoreOpener-3060249aebb25ad453c68d2fbd779e9d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:27,964 DEBUG [StoreOpener-5ee67ce30ed17696dbc3fa4cfc5af3fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:27,965 INFO [StoreOpener-5ee67ce30ed17696dbc3fa4cfc5af3fe-1 {}] regionserver.HStore(327): Store=5ee67ce30ed17696dbc3fa4cfc5af3fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:27,965 INFO [StoreOpener-3060249aebb25ad453c68d2fbd779e9d-1 {}] regionserver.HStore(327): Store=3060249aebb25ad453c68d2fbd779e9d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:27,965 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,965 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,966 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,966 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,966 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,966 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,967 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,967 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,967 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,967 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,968 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,968 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,970 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:27,970 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:27,971 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 3060249aebb25ad453c68d2fbd779e9d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62902566, jitterRate=-0.06267872452735901}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:27,971 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 5ee67ce30ed17696dbc3fa4cfc5af3fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70008483, jitterRate=0.04320769011974335}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:27,971 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:27,971 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:27,971 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 3060249aebb25ad453c68d2fbd779e9d: Running coprocessor pre-open hook at 1733958027961Writing region info on filesystem at 1733958027961Initializing all the Stores at 1733958027962 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958027962Cleaning up temporary data from old regions at 1733958027967 (+5 ms)Running coprocessor post-open hooks at 1733958027971 (+4 ms)Region opened successfully at 1733958027971 2024-12-11T23:00:27,971 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 5ee67ce30ed17696dbc3fa4cfc5af3fe: Running coprocessor pre-open hook at 1733958027960Writing region info on filesystem at 1733958027960Initializing all the Stores at 1733958027962 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958027962Cleaning up temporary data from old regions at 1733958027967 (+5 ms)Running coprocessor post-open hooks at 1733958027971 (+4 ms)Region opened successfully at 1733958027971 2024-12-11T23:00:27,972 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe., pid=146, masterSystemTime=1733958027953 2024-12-11T23:00:27,972 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d., pid=147, masterSystemTime=1733958027954 2024-12-11T23:00:27,973 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,973 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:27,974 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=3060249aebb25ad453c68d2fbd779e9d, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:27,974 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,974 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:27,975 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=5ee67ce30ed17696dbc3fa4cfc5af3fe, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:27,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3060249aebb25ad453c68d2fbd779e9d, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:27,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ee67ce30ed17696dbc3fa4cfc5af3fe, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:00:27,978 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-11T23:00:27,978 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 3060249aebb25ad453c68d2fbd779e9d, server=b762025f20c5,33867,1733957770938 in 174 msec 2024-12-11T23:00:27,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-11T23:00:27,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 5ee67ce30ed17696dbc3fa4cfc5af3fe, server=b762025f20c5,41635,1733957771023 in 176 msec 2024-12-11T23:00:27,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, ASSIGN in 334 msec 2024-12-11T23:00:27,981 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-11T23:00:27,982 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, ASSIGN in 335 msec 2024-12-11T23:00:27,983 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:00:27,983 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958027983"}]},"ts":"1733958027983"} 2024-12-11T23:00:27,985 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-11T23:00:27,986 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:00:27,986 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-11T23:00:27,990 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-11T23:00:27,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:27,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:27,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:27,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,007 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:28,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 463 msec 2024-12-11T23:00:28,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T23:00:28,175 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T23:00:28,178 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d., hostname=b762025f20c5,33867,1733957770938, seqNum=2] 2024-12-11T23:00:28,182 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe., hostname=b762025f20c5,41635,1733957771023, seqNum=2] 2024-12-11T23:00:28,184 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-11T23:00:28,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [3060249aebb25ad453c68d2fbd779e9d, 5ee67ce30ed17696dbc3fa4cfc5af3fe] 2024-12-11T23:00:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3060249aebb25ad453c68d2fbd779e9d, 5ee67ce30ed17696dbc3fa4cfc5af3fe], force=true 2024-12-11T23:00:28,206 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3060249aebb25ad453c68d2fbd779e9d, 5ee67ce30ed17696dbc3fa4cfc5af3fe], force=true 2024-12-11T23:00:28,206 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3060249aebb25ad453c68d2fbd779e9d, 5ee67ce30ed17696dbc3fa4cfc5af3fe], force=true 2024-12-11T23:00:28,206 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3060249aebb25ad453c68d2fbd779e9d, 5ee67ce30ed17696dbc3fa4cfc5af3fe], force=true 2024-12-11T23:00:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T23:00:28,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, UNASSIGN}] 2024-12-11T23:00:28,214 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, UNASSIGN 2024-12-11T23:00:28,214 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, UNASSIGN 2024-12-11T23:00:28,215 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=3060249aebb25ad453c68d2fbd779e9d, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:28,215 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=5ee67ce30ed17696dbc3fa4cfc5af3fe, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:28,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, UNASSIGN because future has completed 2024-12-11T23:00:28,216 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:28,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3060249aebb25ad453c68d2fbd779e9d, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:28,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, UNASSIGN because future has completed 2024-12-11T23:00:28,218 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:28,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5ee67ce30ed17696dbc3fa4cfc5af3fe, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:00:28,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T23:00:28,370 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:28,370 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T23:00:28,370 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 3060249aebb25ad453c68d2fbd779e9d, disabling compactions & flushes 2024-12-11T23:00:28,370 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:28,370 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:28,370 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. after waiting 0 ms 2024-12-11T23:00:28,370 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:28,370 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 3060249aebb25ad453c68d2fbd779e9d 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-11T23:00:28,370 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:28,371 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T23:00:28,371 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 5ee67ce30ed17696dbc3fa4cfc5af3fe, disabling compactions & flushes 2024-12-11T23:00:28,371 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:28,371 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:28,371 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. after waiting 0 ms 2024-12-11T23:00:28,371 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:28,371 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 5ee67ce30ed17696dbc3fa4cfc5af3fe 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-11T23:00:28,384 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/.tmp/cf/268b71fa39f4432eaa6f30c9c2200339 is 28, key is 1/cf:/1733958028179/Put/seqid=0 2024-12-11T23:00:28,388 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/.tmp/cf/6f3b45001843444497b46b16adaffe1a is 28, key is 2/cf:/1733958028183/Put/seqid=0 2024-12-11T23:00:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742196_1372 (size=4945) 2024-12-11T23:00:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742196_1372 (size=4945) 2024-12-11T23:00:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742196_1372 (size=4945) 2024-12-11T23:00:28,390 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/.tmp/cf/268b71fa39f4432eaa6f30c9c2200339 2024-12-11T23:00:28,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742197_1373 (size=4945) 2024-12-11T23:00:28,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742197_1373 (size=4945) 2024-12-11T23:00:28,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742197_1373 (size=4945) 2024-12-11T23:00:28,393 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/.tmp/cf/6f3b45001843444497b46b16adaffe1a 2024-12-11T23:00:28,394 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/.tmp/cf/268b71fa39f4432eaa6f30c9c2200339 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf/268b71fa39f4432eaa6f30c9c2200339 2024-12-11T23:00:28,398 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/.tmp/cf/6f3b45001843444497b46b16adaffe1a as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf/6f3b45001843444497b46b16adaffe1a 2024-12-11T23:00:28,399 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf/268b71fa39f4432eaa6f30c9c2200339, entries=1, sequenceid=5, filesize=4.8 K 2024-12-11T23:00:28,400 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 3060249aebb25ad453c68d2fbd779e9d in 30ms, sequenceid=5, compaction requested=false 2024-12-11T23:00:28,400 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-11T23:00:28,403 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf/6f3b45001843444497b46b16adaffe1a, entries=1, sequenceid=5, filesize=4.8 K 2024-12-11T23:00:28,404 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 5ee67ce30ed17696dbc3fa4cfc5af3fe in 33ms, sequenceid=5, compaction requested=false 2024-12-11T23:00:28,405 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T23:00:28,405 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:28,406 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. 2024-12-11T23:00:28,406 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 3060249aebb25ad453c68d2fbd779e9d: Waiting for close lock at 1733958028370Running coprocessor pre-close hooks at 1733958028370Disabling compacts and flushes for region at 1733958028370Disabling writes for close at 1733958028370Obtaining lock to block concurrent updates at 1733958028370Preparing flush snapshotting stores in 3060249aebb25ad453c68d2fbd779e9d at 1733958028370Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733958028370Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d. at 1733958028371 (+1 ms)Flushing 3060249aebb25ad453c68d2fbd779e9d/cf: creating writer at 1733958028371Flushing 3060249aebb25ad453c68d2fbd779e9d/cf: appending metadata at 1733958028384 (+13 ms)Flushing 3060249aebb25ad453c68d2fbd779e9d/cf: closing flushed file at 1733958028384Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28b44667: reopening flushed file at 1733958028394 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 3060249aebb25ad453c68d2fbd779e9d in 30ms, sequenceid=5, compaction requested=false at 1733958028400 (+6 ms)Writing region close event to WAL at 1733958028402 (+2 ms)Running coprocessor post-close hooks at 1733958028405 (+3 ms)Closed at 1733958028406 (+1 ms) 2024-12-11T23:00:28,407 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:28,408 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=3060249aebb25ad453c68d2fbd779e9d, regionState=CLOSED 2024-12-11T23:00:28,408 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T23:00:28,409 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:28,409 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. 2024-12-11T23:00:28,409 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 5ee67ce30ed17696dbc3fa4cfc5af3fe: Waiting for close lock at 1733958028371Running coprocessor pre-close hooks at 1733958028371Disabling compacts and flushes for region at 1733958028371Disabling writes for close at 1733958028371Obtaining lock to block concurrent updates at 1733958028371Preparing flush snapshotting stores in 5ee67ce30ed17696dbc3fa4cfc5af3fe at 1733958028371Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733958028371Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe. at 1733958028371Flushing 5ee67ce30ed17696dbc3fa4cfc5af3fe/cf: creating writer at 1733958028371Flushing 5ee67ce30ed17696dbc3fa4cfc5af3fe/cf: appending metadata at 1733958028388 (+17 ms)Flushing 5ee67ce30ed17696dbc3fa4cfc5af3fe/cf: closing flushed file at 1733958028388Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@211d9065: reopening flushed file at 1733958028397 (+9 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 5ee67ce30ed17696dbc3fa4cfc5af3fe in 33ms, sequenceid=5, compaction requested=false at 1733958028404 (+7 ms)Writing region close event to WAL at 1733958028405 (+1 ms)Running coprocessor post-close hooks at 1733958028409 (+4 ms)Closed at 1733958028409 2024-12-11T23:00:28,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3060249aebb25ad453c68d2fbd779e9d, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:28,411 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:28,411 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=5ee67ce30ed17696dbc3fa4cfc5af3fe, regionState=CLOSED 2024-12-11T23:00:28,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-11T23:00:28,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 3060249aebb25ad453c68d2fbd779e9d, server=b762025f20c5,33867,1733957770938 in 194 msec 2024-12-11T23:00:28,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3060249aebb25ad453c68d2fbd779e9d, UNASSIGN in 199 msec 2024-12-11T23:00:28,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5ee67ce30ed17696dbc3fa4cfc5af3fe, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:00:28,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-11T23:00:28,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 5ee67ce30ed17696dbc3fa4cfc5af3fe, server=b762025f20c5,41635,1733957771023 in 196 msec 2024-12-11T23:00:28,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=148 2024-12-11T23:00:28,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5ee67ce30ed17696dbc3fa4cfc5af3fe, UNASSIGN in 203 msec 2024-12-11T23:00:28,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742198_1374 (size=84) 2024-12-11T23:00:28,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742198_1374 (size=84) 2024-12-11T23:00:28,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742198_1374 (size=84) 2024-12-11T23:00:28,428 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:28,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742199_1375 (size=20) 2024-12-11T23:00:28,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742199_1375 (size=20) 2024-12-11T23:00:28,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742199_1375 (size=20) 2024-12-11T23:00:28,435 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742200_1376 (size=21) 2024-12-11T23:00:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742200_1376 (size=21) 2024-12-11T23:00:28,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742200_1376 (size=21) 2024-12-11T23:00:28,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742201_1377 (size=84) 2024-12-11T23:00:28,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742201_1377 (size=84) 2024-12-11T23:00:28,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742201_1377 (size=84) 2024-12-11T23:00:28,447 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:28,454 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-11T23:00:28,456 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027542.3060249aebb25ad453c68d2fbd779e9d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:28,456 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733958027542.5ee67ce30ed17696dbc3fa4cfc5af3fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:28,456 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:28,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, ASSIGN}] 2024-12-11T23:00:28,460 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, ASSIGN 2024-12-11T23:00:28,461 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, ASSIGN; state=MERGED, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:00:28,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T23:00:28,611 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T23:00:28,611 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=f7e2555a0bb1b75579ee19dc57dddecc, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:28,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, ASSIGN because future has completed 2024-12-11T23:00:28,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:28,770 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:28,770 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => f7e2555a0bb1b75579ee19dc57dddecc, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.', STARTKEY => '', ENDKEY => ''} 2024-12-11T23:00:28,770 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. service=AccessControlService 2024-12-11T23:00:28,771 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:28,771 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,771 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:28,771 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,771 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,773 INFO [StoreOpener-f7e2555a0bb1b75579ee19dc57dddecc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,774 INFO [StoreOpener-f7e2555a0bb1b75579ee19dc57dddecc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7e2555a0bb1b75579ee19dc57dddecc columnFamilyName cf 2024-12-11T23:00:28,774 DEBUG [StoreOpener-f7e2555a0bb1b75579ee19dc57dddecc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:28,788 DEBUG [StoreOpener-f7e2555a0bb1b75579ee19dc57dddecc-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/268b71fa39f4432eaa6f30c9c2200339.3060249aebb25ad453c68d2fbd779e9d->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf/268b71fa39f4432eaa6f30c9c2200339-top 2024-12-11T23:00:28,792 DEBUG [StoreOpener-f7e2555a0bb1b75579ee19dc57dddecc-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/6f3b45001843444497b46b16adaffe1a.5ee67ce30ed17696dbc3fa4cfc5af3fe->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf/6f3b45001843444497b46b16adaffe1a-top 2024-12-11T23:00:28,793 INFO [StoreOpener-f7e2555a0bb1b75579ee19dc57dddecc-1 {}] regionserver.HStore(327): Store=f7e2555a0bb1b75579ee19dc57dddecc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:28,793 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,793 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,794 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,795 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,795 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,796 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,797 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened f7e2555a0bb1b75579ee19dc57dddecc; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61651894, jitterRate=-0.08131518959999084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:28,797 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,797 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for f7e2555a0bb1b75579ee19dc57dddecc: Running coprocessor pre-open hook at 1733958028771Writing region info on filesystem at 1733958028771Initializing all the Stores at 1733958028772 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958028772Cleaning up temporary data from old regions at 1733958028795 (+23 ms)Running coprocessor post-open hooks at 1733958028797 (+2 ms)Region opened successfully at 1733958028797 2024-12-11T23:00:28,798 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc., pid=154, masterSystemTime=1733958028765 2024-12-11T23:00:28,798 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.,because compaction is disabled. 2024-12-11T23:00:28,800 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:28,800 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:28,800 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=f7e2555a0bb1b75579ee19dc57dddecc, regionState=OPEN, openSeqNum=9, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:28,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:28,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-11T23:00:28,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc, server=b762025f20c5,33867,1733957770938 in 189 msec 2024-12-11T23:00:28,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-11T23:00:28,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, ASSIGN in 344 msec 2024-12-11T23:00:28,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3060249aebb25ad453c68d2fbd779e9d, 5ee67ce30ed17696dbc3fa4cfc5af3fe], force=true in 605 msec 2024-12-11T23:00:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T23:00:28,835 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T23:00:28,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-11T23:00:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958028835 (current time:1733958028835). 2024-12-11T23:00:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:00:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-11T23:00:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:00:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@94186fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:28,837 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:28,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:28,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:28,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5147662c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:28,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:28,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:28,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:28,838 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:28,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14d9c44f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:28,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:28,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:28,841 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:28,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:00:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:28,843 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@312b4ffd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:28,844 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:28,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:28,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:28,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56067962, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:28,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:28,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:28,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:28,845 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:28,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dca0060, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:28,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:28,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:28,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:28,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:00:28,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:28,851 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34820, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:28,851 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:00:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:28,852 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-11T23:00:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:00:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-11T23:00:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-11T23:00:28,854 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:00:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T23:00:28,855 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:00:28,857 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:00:28,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742202_1378 (size=216) 2024-12-11T23:00:28,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742202_1378 (size=216) 2024-12-11T23:00:28,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742202_1378 (size=216) 2024-12-11T23:00:28,865 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:00:28,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc}] 2024-12-11T23:00:28,866 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T23:00:29,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-11T23:00:29,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:29,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for f7e2555a0bb1b75579ee19dc57dddecc: 2024-12-11T23:00:29,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-11T23:00:29,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:29,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/268b71fa39f4432eaa6f30c9c2200339.3060249aebb25ad453c68d2fbd779e9d->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf/268b71fa39f4432eaa6f30c9c2200339-top, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/6f3b45001843444497b46b16adaffe1a.5ee67ce30ed17696dbc3fa4cfc5af3fe->hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf/6f3b45001843444497b46b16adaffe1a-top] hfiles 2024-12-11T23:00:29,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/268b71fa39f4432eaa6f30c9c2200339.3060249aebb25ad453c68d2fbd779e9d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/6f3b45001843444497b46b16adaffe1a.5ee67ce30ed17696dbc3fa4cfc5af3fe for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742203_1379 (size=269) 2024-12-11T23:00:29,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742203_1379 (size=269) 2024-12-11T23:00:29,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742203_1379 (size=269) 2024-12-11T23:00:29,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:29,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-11T23:00:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-11T23:00:29,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:29,028 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:29,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-11T23:00:29,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc in 164 msec 2024-12-11T23:00:29,030 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:00:29,030 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:00:29,031 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:00:29,031 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,031 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742204_1380 (size=670) 2024-12-11T23:00:29,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742204_1380 (size=670) 2024-12-11T23:00:29,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742204_1380 (size=670) 2024-12-11T23:00:29,042 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:00:29,047 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:00:29,047 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,049 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:00:29,049 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-11T23:00:29,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 196 msec 2024-12-11T23:00:29,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T23:00:29,175 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T23:00:29,175 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175 2024-12-11T23:00:29,175 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:29,201 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:29,201 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,202 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:00:29,206 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:29,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742205_1381 (size=216) 2024-12-11T23:00:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742205_1381 (size=216) 2024-12-11T23:00:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742206_1382 (size=670) 2024-12-11T23:00:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742205_1381 (size=216) 2024-12-11T23:00:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742206_1382 (size=670) 2024-12-11T23:00:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742206_1382 (size=670) 2024-12-11T23:00:29,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:29,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:29,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:29,454 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0006_000001 (auth:SIMPLE) from 127.0.0.1:37458 2024-12-11T23:00:29,463 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000001/launch_container.sh] 2024-12-11T23:00:29,463 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000001/container_tokens] 2024-12-11T23:00:29,463 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0006/container_1733957780200_0006_01_000001/sysfs] 2024-12-11T23:00:30,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:30,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T23:00:30,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:30,120 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-11T23:00:30,121 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-11T23:00:30,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-905083451083103256.jar 2024-12-11T23:00:30,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-14781048868627151136.jar 2024-12-11T23:00:30,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:00:30,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T23:00:30,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T23:00:30,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T23:00:30,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T23:00:30,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T23:00:30,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T23:00:30,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T23:00:30,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T23:00:30,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T23:00:30,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T23:00:30,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T23:00:30,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:30,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:30,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:00:30,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:30,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:00:30,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:00:30,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:00:30,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742207_1383 (size=131440) 2024-12-11T23:00:30,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742207_1383 (size=131440) 2024-12-11T23:00:30,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742207_1383 (size=131440) 2024-12-11T23:00:30,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742208_1384 (size=4188619) 2024-12-11T23:00:30,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742208_1384 (size=4188619) 2024-12-11T23:00:30,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742208_1384 (size=4188619) 2024-12-11T23:00:30,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742209_1385 (size=1323991) 2024-12-11T23:00:30,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742209_1385 (size=1323991) 2024-12-11T23:00:30,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742209_1385 (size=1323991) 2024-12-11T23:00:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742210_1386 (size=903541) 2024-12-11T23:00:30,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742210_1386 (size=903541) 2024-12-11T23:00:30,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742210_1386 (size=903541) 2024-12-11T23:00:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742211_1387 (size=8360360) 2024-12-11T23:00:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742211_1387 (size=8360360) 2024-12-11T23:00:30,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742211_1387 (size=8360360) 2024-12-11T23:00:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742212_1388 (size=1877034) 2024-12-11T23:00:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742212_1388 (size=1877034) 2024-12-11T23:00:30,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742212_1388 (size=1877034) 2024-12-11T23:00:30,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742213_1389 (size=77835) 2024-12-11T23:00:30,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742213_1389 (size=77835) 2024-12-11T23:00:30,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742213_1389 (size=77835) 2024-12-11T23:00:30,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742214_1390 (size=30949) 2024-12-11T23:00:30,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742214_1390 (size=30949) 2024-12-11T23:00:30,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742214_1390 (size=30949) 2024-12-11T23:00:30,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742215_1391 (size=1597213) 2024-12-11T23:00:30,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742215_1391 (size=1597213) 2024-12-11T23:00:30,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742215_1391 (size=1597213) 2024-12-11T23:00:30,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742216_1392 (size=4695811) 2024-12-11T23:00:30,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742216_1392 (size=4695811) 2024-12-11T23:00:30,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742216_1392 (size=4695811) 2024-12-11T23:00:30,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742217_1393 (size=232957) 2024-12-11T23:00:30,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742217_1393 (size=232957) 2024-12-11T23:00:30,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742217_1393 (size=232957) 2024-12-11T23:00:30,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742218_1394 (size=127628) 2024-12-11T23:00:30,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742218_1394 (size=127628) 2024-12-11T23:00:30,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742218_1394 (size=127628) 2024-12-11T23:00:30,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742219_1395 (size=20406) 2024-12-11T23:00:30,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742219_1395 (size=20406) 2024-12-11T23:00:30,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742219_1395 (size=20406) 2024-12-11T23:00:30,909 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:00:30,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742220_1396 (size=6426222) 2024-12-11T23:00:30,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742220_1396 (size=6426222) 2024-12-11T23:00:31,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742220_1396 (size=6426222) 2024-12-11T23:00:31,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742221_1397 (size=5175431) 2024-12-11T23:00:31,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742221_1397 (size=5175431) 2024-12-11T23:00:31,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742221_1397 (size=5175431) 2024-12-11T23:00:31,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742222_1398 (size=217634) 2024-12-11T23:00:31,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742222_1398 (size=217634) 2024-12-11T23:00:31,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742222_1398 (size=217634) 2024-12-11T23:00:31,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742223_1399 (size=1832290) 2024-12-11T23:00:31,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742223_1399 (size=1832290) 2024-12-11T23:00:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742223_1399 (size=1832290) 2024-12-11T23:00:31,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742224_1400 (size=322274) 2024-12-11T23:00:31,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742224_1400 (size=322274) 2024-12-11T23:00:31,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742224_1400 (size=322274) 2024-12-11T23:00:31,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742225_1401 (size=503880) 2024-12-11T23:00:31,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742225_1401 (size=503880) 2024-12-11T23:00:31,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742225_1401 (size=503880) 2024-12-11T23:00:31,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742226_1402 (size=443172) 2024-12-11T23:00:31,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742226_1402 (size=443172) 2024-12-11T23:00:31,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742226_1402 (size=443172) 2024-12-11T23:00:31,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742227_1403 (size=29229) 2024-12-11T23:00:31,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742227_1403 (size=29229) 2024-12-11T23:00:31,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742227_1403 (size=29229) 2024-12-11T23:00:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742228_1404 (size=24096) 2024-12-11T23:00:31,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742228_1404 (size=24096) 2024-12-11T23:00:31,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742228_1404 (size=24096) 2024-12-11T23:00:31,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742229_1405 (size=111872) 2024-12-11T23:00:31,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742229_1405 (size=111872) 2024-12-11T23:00:31,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742229_1405 (size=111872) 2024-12-11T23:00:31,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742230_1406 (size=45609) 2024-12-11T23:00:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742230_1406 (size=45609) 2024-12-11T23:00:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742230_1406 (size=45609) 2024-12-11T23:00:31,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742231_1407 (size=136454) 2024-12-11T23:00:31,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742231_1407 (size=136454) 2024-12-11T23:00:31,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742231_1407 (size=136454) 2024-12-11T23:00:31,537 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T23:00:31,540 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-11T23:00:31,542 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-11T23:00:31,542 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-11T23:00:31,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742232_1408 (size=481) 2024-12-11T23:00:31,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742232_1408 (size=481) 2024-12-11T23:00:31,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742232_1408 (size=481) 2024-12-11T23:00:31,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742233_1409 (size=21) 2024-12-11T23:00:31,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742233_1409 (size=21) 2024-12-11T23:00:31,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742233_1409 (size=21) 2024-12-11T23:00:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742234_1410 (size=304053) 2024-12-11T23:00:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742234_1410 (size=304053) 2024-12-11T23:00:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742234_1410 (size=304053) 2024-12-11T23:00:31,690 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:00:31,690 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:00:32,354 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0007_000001 (auth:SIMPLE) from 127.0.0.1:40484 2024-12-11T23:00:37,281 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0007_000001 (auth:SIMPLE) from 127.0.0.1:40168 2024-12-11T23:00:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742235_1411 (size=349751) 2024-12-11T23:00:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742235_1411 (size=349751) 2024-12-11T23:00:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742235_1411 (size=349751) 2024-12-11T23:00:38,685 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:00:39,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0007_000001 (auth:SIMPLE) from 127.0.0.1:45436 2024-12-11T23:00:39,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0007_000001 (auth:SIMPLE) from 127.0.0.1:59524 2024-12-11T23:00:42,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742236_1412 (size=4945) 2024-12-11T23:00:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742236_1412 (size=4945) 2024-12-11T23:00:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742236_1412 (size=4945) 2024-12-11T23:00:42,796 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000002/launch_container.sh] 2024-12-11T23:00:42,796 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000002/container_tokens] 2024-12-11T23:00:42,796 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000002/sysfs] 2024-12-11T23:00:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742238_1414 (size=4945) 2024-12-11T23:00:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742238_1414 (size=4945) 2024-12-11T23:00:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742238_1414 (size=4945) 2024-12-11T23:00:43,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742237_1413 (size=22243) 2024-12-11T23:00:43,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742237_1413 (size=22243) 2024-12-11T23:00:43,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742237_1413 (size=22243) 2024-12-11T23:00:43,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742239_1415 (size=482) 2024-12-11T23:00:43,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742239_1415 (size=482) 2024-12-11T23:00:43,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742239_1415 (size=482) 2024-12-11T23:00:43,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742240_1416 (size=22243) 2024-12-11T23:00:43,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742240_1416 (size=22243) 2024-12-11T23:00:43,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742240_1416 (size=22243) 2024-12-11T23:00:43,305 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000003/launch_container.sh] 2024-12-11T23:00:43,305 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000003/container_tokens] 2024-12-11T23:00:43,305 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000003/sysfs] 2024-12-11T23:00:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742241_1417 (size=349751) 2024-12-11T23:00:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742241_1417 (size=349751) 2024-12-11T23:00:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742241_1417 (size=349751) 2024-12-11T23:00:44,815 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T23:00:44,829 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T23:00:44,837 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:44,837 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T23:00:44,838 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T23:00:44,838 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:44,839 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-11T23:00:44,839 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-11T23:00:44,839 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:44,840 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-11T23:00:44,840 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958029175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-11T23:00:44,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:44,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:44,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T23:00:44,850 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958044850"}]},"ts":"1733958044850"} 2024-12-11T23:00:44,852 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-11T23:00:44,852 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-11T23:00:44,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-11T23:00:44,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, UNASSIGN}] 2024-12-11T23:00:44,856 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, UNASSIGN 2024-12-11T23:00:44,856 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=f7e2555a0bb1b75579ee19dc57dddecc, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:44,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, UNASSIGN because future has completed 2024-12-11T23:00:44,858 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:44,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:44,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T23:00:45,012 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:45,013 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:45,013 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing f7e2555a0bb1b75579ee19dc57dddecc, disabling compactions & flushes 2024-12-11T23:00:45,013 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:45,013 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:45,013 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. after waiting 0 ms 2024-12-11T23:00:45,013 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:45,023 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-11T23:00:45,024 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:45,024 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc. 2024-12-11T23:00:45,024 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for f7e2555a0bb1b75579ee19dc57dddecc: Waiting for close lock at 1733958045013Running coprocessor pre-close hooks at 1733958045013Disabling compacts and flushes for region at 1733958045013Disabling writes for close at 1733958045013Writing region close event to WAL at 1733958045015 (+2 ms)Running coprocessor post-close hooks at 1733958045024 (+9 ms)Closed at 1733958045024 2024-12-11T23:00:45,026 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:45,027 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=f7e2555a0bb1b75579ee19dc57dddecc, regionState=CLOSED 2024-12-11T23:00:45,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:45,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-11T23:00:45,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure f7e2555a0bb1b75579ee19dc57dddecc, server=b762025f20c5,33867,1733957770938 in 172 msec 2024-12-11T23:00:45,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-11T23:00:45,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f7e2555a0bb1b75579ee19dc57dddecc, UNASSIGN in 178 msec 2024-12-11T23:00:45,037 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-11T23:00:45,037 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 183 msec 2024-12-11T23:00:45,038 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958045038"}]},"ts":"1733958045038"} 2024-12-11T23:00:45,040 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-11T23:00:45,040 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-11T23:00:45,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 194 msec 2024-12-11T23:00:45,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T23:00:45,166 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T23:00:45,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,172 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,173 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,176 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,177 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:45,177 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:45,177 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:45,179 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/recovered.edits] 2024-12-11T23:00:45,179 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/recovered.edits] 2024-12-11T23:00:45,179 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/recovered.edits] 2024-12-11T23:00:45,192 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/268b71fa39f4432eaa6f30c9c2200339.3060249aebb25ad453c68d2fbd779e9d to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/268b71fa39f4432eaa6f30c9c2200339.3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:45,192 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf/6f3b45001843444497b46b16adaffe1a to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/cf/6f3b45001843444497b46b16adaffe1a 2024-12-11T23:00:45,192 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/6f3b45001843444497b46b16adaffe1a.5ee67ce30ed17696dbc3fa4cfc5af3fe to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/cf/6f3b45001843444497b46b16adaffe1a.5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:45,192 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf/268b71fa39f4432eaa6f30c9c2200339 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/cf/268b71fa39f4432eaa6f30c9c2200339 2024-12-11T23:00:45,195 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/recovered.edits/8.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe/recovered.edits/8.seqid 2024-12-11T23:00:45,196 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/recovered.edits/12.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc/recovered.edits/12.seqid 2024-12-11T23:00:45,196 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/recovered.edits/8.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d/recovered.edits/8.seqid 2024-12-11T23:00:45,196 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5ee67ce30ed17696dbc3fa4cfc5af3fe 2024-12-11T23:00:45,196 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f7e2555a0bb1b75579ee19dc57dddecc 2024-12-11T23:00:45,197 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3060249aebb25ad453c68d2fbd779e9d 2024-12-11T23:00:45,197 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-11T23:00:45,199 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,203 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-11T23:00:45,205 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-11T23:00:45,206 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,206 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-11T23:00:45,207 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958045206"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:45,209 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-11T23:00:45,209 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f7e2555a0bb1b75579ee19dc57dddecc, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T23:00:45,209 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-11T23:00:45,209 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958045209"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:45,211 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-11T23:00:45,212 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 44 msec 2024-12-11T23:00:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,274 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T23:00:45,274 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T23:00:45,274 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T23:00:45,274 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-11T23:00:45,300 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:45,300 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:45,300 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:45,300 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,300 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:45,300 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T23:00:45,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T23:00:45,305 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958045305"}]},"ts":"1733958045305"} 2024-12-11T23:00:45,307 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-11T23:00:45,307 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-11T23:00:45,308 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-11T23:00:45,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, UNASSIGN}] 2024-12-11T23:00:45,309 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, UNASSIGN 2024-12-11T23:00:45,310 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, UNASSIGN 2024-12-11T23:00:45,310 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=f587accbc369eb56e08a93392794e478, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:45,310 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=ba62783214563f2306fda1446d349a11, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:00:45,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, UNASSIGN because future has completed 2024-12-11T23:00:45,311 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:45,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure f587accbc369eb56e08a93392794e478, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:45,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, UNASSIGN because future has completed 2024-12-11T23:00:45,312 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:45,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure ba62783214563f2306fda1446d349a11, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:00:45,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T23:00:45,464 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close f587accbc369eb56e08a93392794e478 2024-12-11T23:00:45,464 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close ba62783214563f2306fda1446d349a11 2024-12-11T23:00:45,464 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:45,464 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing ba62783214563f2306fda1446d349a11, disabling compactions & flushes 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing f587accbc369eb56e08a93392794e478, disabling compactions & flushes 2024-12-11T23:00:45,465 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:45,465 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. after waiting 0 ms 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. after waiting 0 ms 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:45,465 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:45,472 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:00:45,472 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:00:45,473 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:45,473 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11. 2024-12-11T23:00:45,473 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:45,473 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for ba62783214563f2306fda1446d349a11: Waiting for close lock at 1733958045465Running coprocessor pre-close hooks at 1733958045465Disabling compacts and flushes for region at 1733958045465Disabling writes for close at 1733958045465Writing region close event to WAL at 1733958045467 (+2 ms)Running coprocessor post-close hooks at 1733958045473 (+6 ms)Closed at 1733958045473 2024-12-11T23:00:45,473 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478. 2024-12-11T23:00:45,473 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for f587accbc369eb56e08a93392794e478: Waiting for close lock at 1733958045465Running coprocessor pre-close hooks at 1733958045465Disabling compacts and flushes for region at 1733958045465Disabling writes for close at 1733958045465Writing region close event to WAL at 1733958045467 (+2 ms)Running coprocessor post-close hooks at 1733958045473 (+6 ms)Closed at 1733958045473 2024-12-11T23:00:45,475 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed ba62783214563f2306fda1446d349a11 2024-12-11T23:00:45,475 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=ba62783214563f2306fda1446d349a11, regionState=CLOSED 2024-12-11T23:00:45,476 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed f587accbc369eb56e08a93392794e478 2024-12-11T23:00:45,476 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=f587accbc369eb56e08a93392794e478, regionState=CLOSED 2024-12-11T23:00:45,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure ba62783214563f2306fda1446d349a11, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:00:45,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure f587accbc369eb56e08a93392794e478, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:45,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-12-11T23:00:45,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure ba62783214563f2306fda1446d349a11, server=b762025f20c5,41635,1733957771023 in 165 msec 2024-12-11T23:00:45,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ba62783214563f2306fda1446d349a11, UNASSIGN in 170 msec 2024-12-11T23:00:45,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-12-11T23:00:45,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure f587accbc369eb56e08a93392794e478, server=b762025f20c5,33867,1733957770938 in 167 msec 2024-12-11T23:00:45,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-12-11T23:00:45,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f587accbc369eb56e08a93392794e478, UNASSIGN in 171 msec 2024-12-11T23:00:45,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-11T23:00:45,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 174 msec 2024-12-11T23:00:45,484 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958045484"}]},"ts":"1733958045484"} 2024-12-11T23:00:45,486 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-11T23:00:45,486 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-11T23:00:45,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 185 msec 2024-12-11T23:00:45,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T23:00:45,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T23:00:45,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,629 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,630 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,635 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11 2024-12-11T23:00:45,635 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478 2024-12-11T23:00:45,637 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/recovered.edits] 2024-12-11T23:00:45,637 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/recovered.edits] 2024-12-11T23:00:45,647 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf/3898cb1a5a004b5bb7001a86a926ce40 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/cf/3898cb1a5a004b5bb7001a86a926ce40 2024-12-11T23:00:45,647 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/cf/927a5f6ad36c40f2a9a6e27fb6f4c25e 2024-12-11T23:00:45,651 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11/recovered.edits/9.seqid 2024-12-11T23:00:45,651 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478/recovered.edits/9.seqid 2024-12-11T23:00:45,652 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/ba62783214563f2306fda1446d349a11 2024-12-11T23:00:45,652 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithMergeRegion/f587accbc369eb56e08a93392794e478 2024-12-11T23:00:45,652 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-11T23:00:45,652 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-11T23:00:45,653 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-11T23:00:45,667 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211e3ba624f1cc64db38fa851fb455f0929_ba62783214563f2306fda1446d349a11 2024-12-11T23:00:45,667 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e2024121107927cf5edbe41988eaa6177c42aef39_f587accbc369eb56e08a93392794e478 2024-12-11T23:00:45,668 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-11T23:00:45,670 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,672 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-11T23:00:45,674 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-11T23:00:45,675 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,675 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-11T23:00:45,675 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958045675"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:45,676 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958045675"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:45,677 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T23:00:45,677 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f587accbc369eb56e08a93392794e478, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733958025771.f587accbc369eb56e08a93392794e478.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ba62783214563f2306fda1446d349a11, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733958025771.ba62783214563f2306fda1446d349a11.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T23:00:45,677 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-11T23:00:45,677 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958045677"}]},"ts":"9223372036854775807"} 2024-12-11T23:00:45,679 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-11T23:00:45,679 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 52 msec 2024-12-11T23:00:45,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T23:00:45,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T23:00:45,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T23:00:45,849 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-11T23:00:45,869 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,869 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T23:00:45,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-11T23:00:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-11T23:00:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:45,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-11T23:00:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:45,904 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=814 (was 805) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:35185 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-772668505_1 at /127.0.0.1:57594 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46337 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-772668505_1 at /127.0.0.1:54882 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:57628 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:58142 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6422 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 135241) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:54898 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35185 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:46337 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 777) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=749 (was 766), ProcessCount=15 (was 18), AvailableMemoryMB=4667 (was 3266) - AvailableMemoryMB LEAK? - 2024-12-11T23:00:45,905 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-11T23:00:45,924 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=814, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=749, ProcessCount=15, AvailableMemoryMB=4667 2024-12-11T23:00:45,924 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-11T23:00:45,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:00:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:00:45,927 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:00:45,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-11T23:00:45,928 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:00:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T23:00:45,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742242_1418 (size=443) 2024-12-11T23:00:45,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742242_1418 (size=443) 2024-12-11T23:00:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742242_1418 (size=443) 2024-12-11T23:00:45,936 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 880c0fb4b1e8ffad1f423d4e22137a95, NAME => 'testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:45,936 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 80ba19fc77334646b38565c176ea8e92, NAME => 'testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:45,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742243_1419 (size=68) 2024-12-11T23:00:45,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742243_1419 (size=68) 2024-12-11T23:00:45,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742244_1420 (size=68) 2024-12-11T23:00:45,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742244_1420 (size=68) 2024-12-11T23:00:45,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742244_1420 (size=68) 2024-12-11T23:00:45,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742243_1419 (size=68) 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 880c0fb4b1e8ffad1f423d4e22137a95, disabling compactions & flushes 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 80ba19fc77334646b38565c176ea8e92, disabling compactions & flushes 2024-12-11T23:00:45,944 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:45,944 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. after waiting 0 ms 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. after waiting 0 ms 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:45,944 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:45,944 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 880c0fb4b1e8ffad1f423d4e22137a95: Waiting for close lock at 1733958045944Disabling compacts and flushes for region at 1733958045944Disabling writes for close at 1733958045944Writing region close event to WAL at 1733958045944Closed at 1733958045944 2024-12-11T23:00:45,944 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 80ba19fc77334646b38565c176ea8e92: Waiting for close lock at 1733958045944Disabling compacts and flushes for region at 1733958045944Disabling writes for close at 1733958045944Writing region close event to WAL at 1733958045944Closed at 1733958045944 2024-12-11T23:00:45,946 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:00:45,946 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733958045946"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958045946"}]},"ts":"1733958045946"} 2024-12-11T23:00:45,946 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733958045946"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958045946"}]},"ts":"1733958045946"} 2024-12-11T23:00:45,949 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:00:45,950 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:00:45,950 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958045950"}]},"ts":"1733958045950"} 2024-12-11T23:00:45,952 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-11T23:00:45,952 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:00:45,953 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:00:45,953 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:00:45,953 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:00:45,953 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:00:45,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, ASSIGN}] 2024-12-11T23:00:45,954 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, ASSIGN 2024-12-11T23:00:45,954 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, ASSIGN 2024-12-11T23:00:45,955 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:00:45,955 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T23:00:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T23:00:46,106 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:00:46,107 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=880c0fb4b1e8ffad1f423d4e22137a95, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:00:46,107 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=80ba19fc77334646b38565c176ea8e92, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:46,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, ASSIGN because future has completed 2024-12-11T23:00:46,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:00:46,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, ASSIGN because future has completed 2024-12-11T23:00:46,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 80ba19fc77334646b38565c176ea8e92, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T23:00:46,270 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,270 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 880c0fb4b1e8ffad1f423d4e22137a95, NAME => 'testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:00:46,270 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. service=AccessControlService 2024-12-11T23:00:46,270 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:46,270 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => 80ba19fc77334646b38565c176ea8e92, NAME => 'testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T23:00:46,270 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. service=AccessControlService 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,271 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,271 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,272 INFO [StoreOpener-880c0fb4b1e8ffad1f423d4e22137a95-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,273 INFO [StoreOpener-80ba19fc77334646b38565c176ea8e92-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,274 INFO [StoreOpener-880c0fb4b1e8ffad1f423d4e22137a95-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 880c0fb4b1e8ffad1f423d4e22137a95 columnFamilyName cf 2024-12-11T23:00:46,275 INFO [StoreOpener-80ba19fc77334646b38565c176ea8e92-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 80ba19fc77334646b38565c176ea8e92 columnFamilyName cf 2024-12-11T23:00:46,275 DEBUG [StoreOpener-880c0fb4b1e8ffad1f423d4e22137a95-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:46,275 DEBUG [StoreOpener-80ba19fc77334646b38565c176ea8e92-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:46,276 INFO [StoreOpener-880c0fb4b1e8ffad1f423d4e22137a95-1 {}] regionserver.HStore(327): Store=880c0fb4b1e8ffad1f423d4e22137a95/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:46,276 INFO [StoreOpener-80ba19fc77334646b38565c176ea8e92-1 {}] regionserver.HStore(327): Store=80ba19fc77334646b38565c176ea8e92/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:46,276 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,276 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,277 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,279 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,279 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,281 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:46,281 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:46,282 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 880c0fb4b1e8ffad1f423d4e22137a95; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59077571, jitterRate=-0.11967559158802032}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:46,282 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened 80ba19fc77334646b38565c176ea8e92; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67521943, jitterRate=0.006155356764793396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:46,282 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,282 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,282 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 880c0fb4b1e8ffad1f423d4e22137a95: Running coprocessor pre-open hook at 1733958046271Writing region info on filesystem at 1733958046271Initializing all the Stores at 1733958046272 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958046272Cleaning up temporary data from old regions at 1733958046277 (+5 ms)Running coprocessor post-open hooks at 1733958046282 (+5 ms)Region opened successfully at 1733958046282 2024-12-11T23:00:46,282 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for 80ba19fc77334646b38565c176ea8e92: Running coprocessor pre-open hook at 1733958046271Writing region info on filesystem at 1733958046271Initializing all the Stores at 1733958046272 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958046272Cleaning up temporary data from old regions at 1733958046277 (+5 ms)Running coprocessor post-open hooks at 1733958046282 (+5 ms)Region opened successfully at 1733958046282 2024-12-11T23:00:46,283 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92., pid=173, masterSystemTime=1733958046265 2024-12-11T23:00:46,283 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95., pid=172, masterSystemTime=1733958046264 2024-12-11T23:00:46,285 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,285 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,286 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=880c0fb4b1e8ffad1f423d4e22137a95, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:00:46,286 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:46,286 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:46,287 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=80ba19fc77334646b38565c176ea8e92, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:46,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:00:46,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 80ba19fc77334646b38565c176ea8e92, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:46,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=170 2024-12-11T23:00:46,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95, server=b762025f20c5,37563,1733957770731 in 178 msec 2024-12-11T23:00:46,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=171 2024-12-11T23:00:46,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 80ba19fc77334646b38565c176ea8e92, server=b762025f20c5,33867,1733957770938 in 177 msec 2024-12-11T23:00:46,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, ASSIGN in 337 msec 2024-12-11T23:00:46,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-11T23:00:46,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, ASSIGN in 338 msec 2024-12-11T23:00:46,293 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:00:46,294 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958046293"}]},"ts":"1733958046293"} 2024-12-11T23:00:46,295 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-11T23:00:46,296 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:00:46,296 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-11T23:00:46,298 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T23:00:46,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:46,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:46,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:46,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:46,353 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:46,354 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:46,354 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:46,354 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:46,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 427 msec 2024-12-11T23:00:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T23:00:46,556 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T23:00:46,556 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,562 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-11T23:00:46,563 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,563 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:00:46,565 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,570 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,574 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,576 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T23:00:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958046576 (current time:1733958046576). 2024-12-11T23:00:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:00:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T23:00:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:00:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e852687, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:46,577 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:46,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:46,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:46,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3591b1cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:46,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:46,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,578 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:46,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@128fcd5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:46,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:46,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:46,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:46,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:00:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,582 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76d4a054, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:46,583 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48298bb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,585 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:46,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c5ffa95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:46,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:46,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:46,587 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47884, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:46,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:00:46,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:46,589 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:46,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:00:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,590 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T23:00:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:00:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T23:00:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-11T23:00:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T23:00:46,592 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:00:46,593 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:00:46,595 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:00:46,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742245_1421 (size=170) 2024-12-11T23:00:46,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742245_1421 (size=170) 2024-12-11T23:00:46,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742245_1421 (size=170) 2024-12-11T23:00:46,602 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:00:46,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92}] 2024-12-11T23:00:46,603 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,603 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T23:00:46,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-11T23:00:46,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-11T23:00:46,756 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:46,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 80ba19fc77334646b38565c176ea8e92: 2024-12-11T23:00:46,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 880c0fb4b1e8ffad1f423d4e22137a95: 2024-12-11T23:00:46,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-11T23:00:46,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-11T23:00:46,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-11T23:00:46,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-11T23:00:46,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:46,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:46,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:00:46,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:00:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742246_1422 (size=71) 2024-12-11T23:00:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742246_1422 (size=71) 2024-12-11T23:00:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742247_1423 (size=71) 2024-12-11T23:00:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742247_1423 (size=71) 2024-12-11T23:00:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742247_1423 (size=71) 2024-12-11T23:00:46,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742246_1422 (size=71) 2024-12-11T23:00:46,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-11T23:00:46,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:46,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-11T23:00:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-11T23:00:46,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-11T23:00:46,770 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,770 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,770 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:46,770 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,772 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92 in 169 msec 2024-12-11T23:00:46,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-11T23:00:46,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95 in 169 msec 2024-12-11T23:00:46,772 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:00:46,773 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:00:46,774 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:00:46,774 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:00:46,774 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:46,774 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T23:00:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742248_1424 (size=63) 2024-12-11T23:00:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742248_1424 (size=63) 2024-12-11T23:00:46,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742248_1424 (size=63) 2024-12-11T23:00:46,781 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:00:46,781 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-11T23:00:46,782 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-11T23:00:46,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742249_1425 (size=653) 2024-12-11T23:00:46,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742249_1425 (size=653) 2024-12-11T23:00:46,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742249_1425 (size=653) 2024-12-11T23:00:46,792 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:00:46,796 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:00:46,796 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-11T23:00:46,798 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:00:46,798 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-11T23:00:46,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 207 msec 2024-12-11T23:00:46,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T23:00:46,905 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T23:00:46,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:00:46,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:00:46,914 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,917 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-11T23:00:46,917 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:46,917 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:00:46,918 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,923 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,929 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:46,932 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T23:00:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958046932 (current time:1733958046932). 2024-12-11T23:00:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:00:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T23:00:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:00:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dc0ffc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:46,933 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:46,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:46,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:46,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2493b8fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:46,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:46,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,935 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37948, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:46,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50a53dc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:46,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:46,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:46,938 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:46,939 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,939 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73df92db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:46,941 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:46,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:46,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:46,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68ff0a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:46,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:46,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,942 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:46,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b938b98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:46,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:46,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:46,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:46,945 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47908, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:46,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:00:46,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:46,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:46,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:46,949 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T23:00:46,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:00:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T23:00:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-11T23:00:46,951 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:00:46,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T23:00:46,952 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:00:46,954 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:00:46,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742250_1426 (size=165) 2024-12-11T23:00:46,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742250_1426 (size=165) 2024-12-11T23:00:46,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742250_1426 (size=165) 2024-12-11T23:00:46,960 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:00:46,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92}] 2024-12-11T23:00:46,962 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:46,962 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T23:00:47,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-11T23:00:47,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-11T23:00:47,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:47,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:47,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 880c0fb4b1e8ffad1f423d4e22137a95 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T23:00:47,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 80ba19fc77334646b38565c176ea8e92 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T23:00:47,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95 is 71, key is 04b91932e17bb8f161c3632be615c66c/cf:q/1733958046912/Put/seqid=0 2024-12-11T23:00:47,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92 is 71, key is 17263dc8b5e9188041c1d3462be1459f/cf:q/1733958046913/Put/seqid=0 2024-12-11T23:00:47,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742251_1427 (size=5242) 2024-12-11T23:00:47,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742251_1427 (size=5242) 2024-12-11T23:00:47,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742252_1428 (size=8032) 2024-12-11T23:00:47,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742251_1427 (size=5242) 2024-12-11T23:00:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742252_1428 (size=8032) 2024-12-11T23:00:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742252_1428 (size=8032) 2024-12-11T23:00:47,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:47,142 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:47,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:47,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:47,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/.tmp/cf/4b922f8070b448f2a7611c5b6f559fc6, store: [table=testtb-testExportExpiredSnapshot family=cf region=80ba19fc77334646b38565c176ea8e92] 2024-12-11T23:00:47,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/.tmp/cf/fa26a807a33f44f481ada99213722a57, store: [table=testtb-testExportExpiredSnapshot family=cf region=880c0fb4b1e8ffad1f423d4e22137a95] 2024-12-11T23:00:47,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/.tmp/cf/4b922f8070b448f2a7611c5b6f559fc6 is 209, key is 11bbd34b0e91542ac8d52cab1a5cd00c9/cf:q/1733958046913/Put/seqid=0 2024-12-11T23:00:47,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/.tmp/cf/fa26a807a33f44f481ada99213722a57 is 209, key is 0ac052511f332d72a84298880d6f44d2d/cf:q/1733958046912/Put/seqid=0 2024-12-11T23:00:47,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742254_1430 (size=14589) 2024-12-11T23:00:47,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742254_1430 (size=14589) 2024-12-11T23:00:47,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742253_1429 (size=6328) 2024-12-11T23:00:47,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742253_1429 (size=6328) 2024-12-11T23:00:47,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742254_1430 (size=14589) 2024-12-11T23:00:47,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742253_1429 (size=6328) 2024-12-11T23:00:47,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/.tmp/cf/4b922f8070b448f2a7611c5b6f559fc6 2024-12-11T23:00:47,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/.tmp/cf/fa26a807a33f44f481ada99213722a57 2024-12-11T23:00:47,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/.tmp/cf/4b922f8070b448f2a7611c5b6f559fc6 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf/4b922f8070b448f2a7611c5b6f559fc6 2024-12-11T23:00:47,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/.tmp/cf/fa26a807a33f44f481ada99213722a57 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf/fa26a807a33f44f481ada99213722a57 2024-12-11T23:00:47,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf/fa26a807a33f44f481ada99213722a57, entries=5, sequenceid=6, filesize=6.2 K 2024-12-11T23:00:47,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf/4b922f8070b448f2a7611c5b6f559fc6, entries=45, sequenceid=6, filesize=14.2 K 2024-12-11T23:00:47,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 880c0fb4b1e8ffad1f423d4e22137a95 in 53ms, sequenceid=6, compaction requested=false 2024-12-11T23:00:47,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 80ba19fc77334646b38565c176ea8e92 in 53ms, sequenceid=6, compaction requested=false 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 880c0fb4b1e8ffad1f423d4e22137a95: 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 80ba19fc77334646b38565c176ea8e92: 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. for snaptb0-testExportExpiredSnapshot completed. 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. for snaptb0-testExportExpiredSnapshot completed. 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf/fa26a807a33f44f481ada99213722a57] hfiles 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf/fa26a807a33f44f481ada99213722a57 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf/4b922f8070b448f2a7611c5b6f559fc6] hfiles 2024-12-11T23:00:47,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf/4b922f8070b448f2a7611c5b6f559fc6 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742256_1432 (size=110) 2024-12-11T23:00:47,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742256_1432 (size=110) 2024-12-11T23:00:47,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742256_1432 (size=110) 2024-12-11T23:00:47,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742255_1431 (size=110) 2024-12-11T23:00:47,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:47,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-11T23:00:47,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742255_1431 (size=110) 2024-12-11T23:00:47,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-11T23:00:47,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742255_1431 (size=110) 2024-12-11T23:00:47,182 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:47,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:47,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-11T23:00:47,182 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:47,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-11T23:00:47,183 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:47,183 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:47,184 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 80ba19fc77334646b38565c176ea8e92 in 222 msec 2024-12-11T23:00:47,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-12-11T23:00:47,185 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:00:47,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95 in 222 msec 2024-12-11T23:00:47,185 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:00:47,186 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:00:47,186 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:00:47,186 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:47,187 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95] hfiles 2024-12-11T23:00:47,187 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:47,187 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:47,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742257_1433 (size=294) 2024-12-11T23:00:47,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742257_1433 (size=294) 2024-12-11T23:00:47,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742257_1433 (size=294) 2024-12-11T23:00:47,193 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:00:47,193 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,193 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742258_1434 (size=963) 2024-12-11T23:00:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742258_1434 (size=963) 2024-12-11T23:00:47,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742258_1434 (size=963) 2024-12-11T23:00:47,208 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:00:47,212 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:00:47,212 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-11T23:00:47,213 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:00:47,214 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-11T23:00:47,214 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 264 msec 2024-12-11T23:00:47,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T23:00:47,265 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T23:00:47,266 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:00:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-11T23:00:47,267 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:00:47,268 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-11T23:00:47,268 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:00:47,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T23:00:47,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742259_1435 (size=436) 2024-12-11T23:00:47,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742259_1435 (size=436) 2024-12-11T23:00:47,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742259_1435 (size=436) 2024-12-11T23:00:47,279 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 189b562ad19e67a2b978aec9d57dad01, NAME => 'testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:47,279 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f2c62d44aedcf51b4978173adc0395ae, NAME => 'testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742261_1437 (size=61) 2024-12-11T23:00:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742260_1436 (size=61) 2024-12-11T23:00:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742261_1437 (size=61) 2024-12-11T23:00:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742260_1436 (size=61) 2024-12-11T23:00:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742261_1437 (size=61) 2024-12-11T23:00:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742260_1436 (size=61) 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 189b562ad19e67a2b978aec9d57dad01, disabling compactions & flushes 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing f2c62d44aedcf51b4978173adc0395ae, disabling compactions & flushes 2024-12-11T23:00:47,287 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,287 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. after waiting 0 ms 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. after waiting 0 ms 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,287 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,287 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for f2c62d44aedcf51b4978173adc0395ae: Waiting for close lock at 1733958047287Disabling compacts and flushes for region at 1733958047287Disabling writes for close at 1733958047287Writing region close event to WAL at 1733958047287Closed at 1733958047287 2024-12-11T23:00:47,287 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 189b562ad19e67a2b978aec9d57dad01: Waiting for close lock at 1733958047287Disabling compacts and flushes for region at 1733958047287Disabling writes for close at 1733958047287Writing region close event to WAL at 1733958047287Closed at 1733958047287 2024-12-11T23:00:47,288 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:00:47,289 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733958047289"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958047289"}]},"ts":"1733958047289"} 2024-12-11T23:00:47,289 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733958047289"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958047289"}]},"ts":"1733958047289"} 2024-12-11T23:00:47,291 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:00:47,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:00:47,291 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958047291"}]},"ts":"1733958047291"} 2024-12-11T23:00:47,293 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-11T23:00:47,293 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:00:47,294 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:00:47,294 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:00:47,294 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:00:47,294 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:00:47,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f2c62d44aedcf51b4978173adc0395ae, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=189b562ad19e67a2b978aec9d57dad01, ASSIGN}] 2024-12-11T23:00:47,295 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=189b562ad19e67a2b978aec9d57dad01, ASSIGN 2024-12-11T23:00:47,295 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f2c62d44aedcf51b4978173adc0395ae, ASSIGN 2024-12-11T23:00:47,296 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=189b562ad19e67a2b978aec9d57dad01, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:00:47,296 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f2c62d44aedcf51b4978173adc0395ae, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T23:00:47,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T23:00:47,446 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:00:47,447 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=189b562ad19e67a2b978aec9d57dad01, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:47,447 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=f2c62d44aedcf51b4978173adc0395ae, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:00:47,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=189b562ad19e67a2b978aec9d57dad01, ASSIGN because future has completed 2024-12-11T23:00:47,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 189b562ad19e67a2b978aec9d57dad01, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:47,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f2c62d44aedcf51b4978173adc0395ae, ASSIGN because future has completed 2024-12-11T23:00:47,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure f2c62d44aedcf51b4978173adc0395ae, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:00:47,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T23:00:47,610 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,610 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,610 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 189b562ad19e67a2b978aec9d57dad01, NAME => 'testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T23:00:47,610 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => f2c62d44aedcf51b4978173adc0395ae, NAME => 'testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:00:47,610 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. service=AccessControlService 2024-12-11T23:00:47,610 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. service=AccessControlService 2024-12-11T23:00:47,611 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:47,611 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,611 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,613 INFO [StoreOpener-189b562ad19e67a2b978aec9d57dad01-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,613 INFO [StoreOpener-f2c62d44aedcf51b4978173adc0395ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,614 INFO [StoreOpener-f2c62d44aedcf51b4978173adc0395ae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f2c62d44aedcf51b4978173adc0395ae columnFamilyName cf 2024-12-11T23:00:47,614 INFO [StoreOpener-189b562ad19e67a2b978aec9d57dad01-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 189b562ad19e67a2b978aec9d57dad01 columnFamilyName cf 2024-12-11T23:00:47,615 DEBUG [StoreOpener-189b562ad19e67a2b978aec9d57dad01-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:47,615 INFO [StoreOpener-189b562ad19e67a2b978aec9d57dad01-1 {}] regionserver.HStore(327): Store=189b562ad19e67a2b978aec9d57dad01/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:47,615 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,616 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,616 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,616 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,616 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,617 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,690 DEBUG [StoreOpener-f2c62d44aedcf51b4978173adc0395ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:47,692 INFO [StoreOpener-f2c62d44aedcf51b4978173adc0395ae-1 {}] regionserver.HStore(327): Store=f2c62d44aedcf51b4978173adc0395ae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:00:47,692 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:47,693 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,693 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 189b562ad19e67a2b978aec9d57dad01; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60650324, jitterRate=-0.09623974561691284}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:47,693 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:47,694 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,695 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,695 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 189b562ad19e67a2b978aec9d57dad01: Running coprocessor pre-open hook at 1733958047611Writing region info on filesystem at 1733958047611Initializing all the Stores at 1733958047612 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958047613 (+1 ms)Cleaning up temporary data from old regions at 1733958047616 (+3 ms)Running coprocessor post-open hooks at 1733958047693 (+77 ms)Region opened successfully at 1733958047695 (+2 ms) 2024-12-11T23:00:47,695 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,695 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,696 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01., pid=183, masterSystemTime=1733958047604 2024-12-11T23:00:47,698 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,698 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,698 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:47,699 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=189b562ad19e67a2b978aec9d57dad01, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:47,700 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:00:47,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 189b562ad19e67a2b978aec9d57dad01, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:00:47,701 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened f2c62d44aedcf51b4978173adc0395ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66998405, jitterRate=-0.0016459673643112183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:00:47,701 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:47,701 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for f2c62d44aedcf51b4978173adc0395ae: Running coprocessor pre-open hook at 1733958047611Writing region info on filesystem at 1733958047611Initializing all the Stores at 1733958047612 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958047613 (+1 ms)Cleaning up temporary data from old regions at 1733958047695 (+82 ms)Running coprocessor post-open hooks at 1733958047701 (+6 ms)Region opened successfully at 1733958047701 2024-12-11T23:00:47,702 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae., pid=184, masterSystemTime=1733958047607 2024-12-11T23:00:47,703 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,704 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:47,704 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=f2c62d44aedcf51b4978173adc0395ae, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:00:47,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-12-11T23:00:47,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 189b562ad19e67a2b978aec9d57dad01, server=b762025f20c5,33867,1733957770938 in 251 msec 2024-12-11T23:00:47,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=189b562ad19e67a2b978aec9d57dad01, ASSIGN in 410 msec 2024-12-11T23:00:47,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure f2c62d44aedcf51b4978173adc0395ae, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:00:47,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-12-11T23:00:47,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure f2c62d44aedcf51b4978173adc0395ae, server=b762025f20c5,37563,1733957770731 in 254 msec 2024-12-11T23:00:47,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-11T23:00:47,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f2c62d44aedcf51b4978173adc0395ae, ASSIGN in 414 msec 2024-12-11T23:00:47,710 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:00:47,711 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958047710"}]},"ts":"1733958047710"} 2024-12-11T23:00:47,712 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-11T23:00:47,713 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:00:47,713 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-11T23:00:47,715 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T23:00:47,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T23:00:48,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:48,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:48,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:48,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:00:48,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:00:48,616 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 1.3460 sec 2024-12-11T23:00:49,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T23:00:49,416 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-11T23:00:49,416 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,422 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-11T23:00:49,422 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:49,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:00:49,426 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,430 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,436 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,445 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:00:49,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:00:49,448 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,450 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-11T23:00:49,450 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:49,450 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:00:49,452 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,455 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T23:00:49,460 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-11T23:00:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T23:00:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:00:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542764a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:49,461 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:49,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:49,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:49,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55654a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:49,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:49,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:49,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:49,462 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:49,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43abfe14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:49,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:49,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:49,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:49,464 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:49,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:49,465 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@115ee50e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:00:49,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:00:49,466 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:00:49,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:00:49,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:00:49,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79cc5064, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:49,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:00:49,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:00:49,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:49,467 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:00:49,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49074420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:00:49,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:00:49,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:00:49,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:49,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59460, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:49,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:00:49,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:00:49,473 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35032, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:00:49,474 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:00:49,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:00:49,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:49,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:00:49,474 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:00:49,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T23:00:49,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:00:49,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-11T23:00:49,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-11T23:00:49,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T23:00:49,477 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:00:49,477 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:00:49,479 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:00:49,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742262_1438 (size=152) 2024-12-11T23:00:49,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742262_1438 (size=152) 2024-12-11T23:00:49,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742262_1438 (size=152) 2024-12-11T23:00:49,485 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:00:49,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f2c62d44aedcf51b4978173adc0395ae}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 189b562ad19e67a2b978aec9d57dad01}] 2024-12-11T23:00:49,485 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:49,486 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:49,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T23:00:49,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-11T23:00:49,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-11T23:00:49,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:49,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:49,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing f2c62d44aedcf51b4978173adc0395ae 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T23:00:49,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 189b562ad19e67a2b978aec9d57dad01 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T23:00:49,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121181b7f57a7a0446cb8324dd3564f96b55_189b562ad19e67a2b978aec9d57dad01 is 71, key is 14b7e33e6e688d5898058c8e3aa3e314/cf:q/1733958049446/Put/seqid=0 2024-12-11T23:00:49,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121163678a8dfdeb44dfb3ee31e6a78a83c5_f2c62d44aedcf51b4978173adc0395ae is 71, key is 0185167a2de4c35a75acb367838d324a/cf:q/1733958049445/Put/seqid=0 2024-12-11T23:00:49,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742263_1439 (size=8101) 2024-12-11T23:00:49,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742263_1439 (size=8101) 2024-12-11T23:00:49,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742263_1439 (size=8101) 2024-12-11T23:00:49,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742264_1440 (size=5172) 2024-12-11T23:00:49,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:49,691 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-11T23:00:49,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742264_1440 (size=5172) 2024-12-11T23:00:49,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742264_1440 (size=5172) 2024-12-11T23:00:49,693 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:49,695 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121181b7f57a7a0446cb8324dd3564f96b55_189b562ad19e67a2b978aec9d57dad01 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024121181b7f57a7a0446cb8324dd3564f96b55_189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:49,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/.tmp/cf/ab308ab8a510464fbf7136b5621540a9, store: [table=testExportExpiredSnapshot family=cf region=189b562ad19e67a2b978aec9d57dad01] 2024-12-11T23:00:49,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/.tmp/cf/ab308ab8a510464fbf7136b5621540a9 is 202, key is 1a425f0d180584adc84fc3cf49bd52e00/cf:q/1733958049446/Put/seqid=0 2024-12-11T23:00:49,696 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121163678a8dfdeb44dfb3ee31e6a78a83c5_f2c62d44aedcf51b4978173adc0395ae to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e2024121163678a8dfdeb44dfb3ee31e6a78a83c5_f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:49,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/.tmp/cf/68af86b934964f6a976e5e8b88d60e92, store: [table=testExportExpiredSnapshot family=cf region=f2c62d44aedcf51b4978173adc0395ae] 2024-12-11T23:00:49,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/.tmp/cf/68af86b934964f6a976e5e8b88d60e92 is 202, key is 0081fa9557b6e86cff18bf4ab43c57a5e/cf:q/1733958049445/Put/seqid=0 2024-12-11T23:00:49,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742265_1441 (size=14463) 2024-12-11T23:00:49,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742265_1441 (size=14463) 2024-12-11T23:00:49,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742265_1441 (size=14463) 2024-12-11T23:00:49,700 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/.tmp/cf/ab308ab8a510464fbf7136b5621540a9 2024-12-11T23:00:49,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742266_1442 (size=6088) 2024-12-11T23:00:49,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742266_1442 (size=6088) 2024-12-11T23:00:49,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/.tmp/cf/ab308ab8a510464fbf7136b5621540a9 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/cf/ab308ab8a510464fbf7136b5621540a9 2024-12-11T23:00:49,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742266_1442 (size=6088) 2024-12-11T23:00:49,732 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/.tmp/cf/68af86b934964f6a976e5e8b88d60e92 2024-12-11T23:00:49,736 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/cf/ab308ab8a510464fbf7136b5621540a9, entries=46, sequenceid=5, filesize=14.1 K 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/.tmp/cf/68af86b934964f6a976e5e8b88d60e92 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/cf/68af86b934964f6a976e5e8b88d60e92 2024-12-11T23:00:49,737 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 189b562ad19e67a2b978aec9d57dad01 in 99ms, sequenceid=5, compaction requested=false 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 189b562ad19e67a2b978aec9d57dad01: 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. for snapshot-testExportExpiredSnapshot completed. 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/cf/ab308ab8a510464fbf7136b5621540a9] hfiles 2024-12-11T23:00:49,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/cf/ab308ab8a510464fbf7136b5621540a9 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,741 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/cf/68af86b934964f6a976e5e8b88d60e92, entries=4, sequenceid=5, filesize=5.9 K 2024-12-11T23:00:49,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742267_1443 (size=103) 2024-12-11T23:00:49,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742267_1443 (size=103) 2024-12-11T23:00:49,742 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for f2c62d44aedcf51b4978173adc0395ae in 104ms, sequenceid=5, compaction requested=false 2024-12-11T23:00:49,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742267_1443 (size=103) 2024-12-11T23:00:49,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for f2c62d44aedcf51b4978173adc0395ae: 2024-12-11T23:00:49,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. for snapshot-testExportExpiredSnapshot completed. 2024-12-11T23:00:49,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:00:49,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:00:49,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/cf/68af86b934964f6a976e5e8b88d60e92] hfiles 2024-12-11T23:00:49,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-11T23:00:49,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/cf/68af86b934964f6a976e5e8b88d60e92 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-11T23:00:49,743 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:49,743 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:49,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 189b562ad19e67a2b978aec9d57dad01 in 259 msec 2024-12-11T23:00:49,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742268_1444 (size=103) 2024-12-11T23:00:49,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742268_1444 (size=103) 2024-12-11T23:00:49,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742268_1444 (size=103) 2024-12-11T23:00:49,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:00:49,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-11T23:00:49,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-11T23:00:49,749 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:49,749 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:49,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-11T23:00:49,751 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:00:49,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f2c62d44aedcf51b4978173adc0395ae in 264 msec 2024-12-11T23:00:49,751 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:00:49,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:00:49,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:00:49,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:00:49,753 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024121181b7f57a7a0446cb8324dd3564f96b55_189b562ad19e67a2b978aec9d57dad01, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e2024121163678a8dfdeb44dfb3ee31e6a78a83c5_f2c62d44aedcf51b4978173adc0395ae] hfiles 2024-12-11T23:00:49,753 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024121181b7f57a7a0446cb8324dd3564f96b55_189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:00:49,753 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e2024121163678a8dfdeb44dfb3ee31e6a78a83c5_f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:00:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742269_1445 (size=287) 2024-12-11T23:00:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742269_1445 (size=287) 2024-12-11T23:00:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742269_1445 (size=287) 2024-12-11T23:00:49,760 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:00:49,760 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,761 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,765 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0007_000001 (auth:SIMPLE) from 127.0.0.1:52846 2024-12-11T23:00:49,774 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000001/launch_container.sh] 2024-12-11T23:00:49,774 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000001/container_tokens] 2024-12-11T23:00:49,774 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0007/container_1733957780200_0007_01_000001/sysfs] 2024-12-11T23:00:49,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742270_1446 (size=935) 2024-12-11T23:00:49,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742270_1446 (size=935) 2024-12-11T23:00:49,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742270_1446 (size=935) 2024-12-11T23:00:49,777 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:00:49,781 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:00:49,781 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-11T23:00:49,782 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:00:49,782 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-11T23:00:49,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 307 msec 2024-12-11T23:00:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T23:00:49,795 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-11T23:00:50,544 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:00:50,563 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-11T23:00:50,563 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-11T23:00:50,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-11T23:00:50,564 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-11T23:00:50,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T23:00:50,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T23:00:56,066 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:00:59,803 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958059803 2024-12-11T23:00:59,803 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958059803, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958059803, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:59,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:00:59,828 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958059803, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958059803/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-11T23:00:59,830 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:00:59,831 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:00:59,832 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-11T23:00:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:00:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T23:00:59,835 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958059834"}]},"ts":"1733958059834"} 2024-12-11T23:00:59,836 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-11T23:00:59,836 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-11T23:00:59,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-11T23:00:59,839 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, UNASSIGN}] 2024-12-11T23:00:59,839 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, UNASSIGN 2024-12-11T23:00:59,839 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, UNASSIGN 2024-12-11T23:00:59,840 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=880c0fb4b1e8ffad1f423d4e22137a95, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:00:59,840 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=80ba19fc77334646b38565c176ea8e92, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:00:59,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, UNASSIGN because future has completed 2024-12-11T23:00:59,841 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:59,841 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:00:59,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, UNASSIGN because future has completed 2024-12-11T23:00:59,842 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:00:59,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 80ba19fc77334646b38565c176ea8e92, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:00:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T23:00:59,993 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:00:59,993 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:59,993 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing 880c0fb4b1e8ffad1f423d4e22137a95, disabling compactions & flushes 2024-12-11T23:00:59,993 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:59,993 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:59,993 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. after waiting 0 ms 2024-12-11T23:00:59,993 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:59,994 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:00:59,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:00:59,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 80ba19fc77334646b38565c176ea8e92, disabling compactions & flushes 2024-12-11T23:00:59,994 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:59,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:59,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. after waiting 0 ms 2024-12-11T23:00:59,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:59,997 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:00:59,997 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:00:59,997 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:59,997 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:00:59,997 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92. 2024-12-11T23:00:59,997 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95. 2024-12-11T23:00:59,997 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 80ba19fc77334646b38565c176ea8e92: Waiting for close lock at 1733958059994Running coprocessor pre-close hooks at 1733958059994Disabling compacts and flushes for region at 1733958059994Disabling writes for close at 1733958059994Writing region close event to WAL at 1733958059995 (+1 ms)Running coprocessor post-close hooks at 1733958059997 (+2 ms)Closed at 1733958059997 2024-12-11T23:00:59,997 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for 880c0fb4b1e8ffad1f423d4e22137a95: Waiting for close lock at 1733958059993Running coprocessor pre-close hooks at 1733958059993Disabling compacts and flushes for region at 1733958059993Disabling writes for close at 1733958059993Writing region close event to WAL at 1733958059994 (+1 ms)Running coprocessor post-close hooks at 1733958059997 (+3 ms)Closed at 1733958059997 2024-12-11T23:00:59,999 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 80ba19fc77334646b38565c176ea8e92 2024-12-11T23:01:00,001 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=80ba19fc77334646b38565c176ea8e92, regionState=CLOSED 2024-12-11T23:01:00,001 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed 880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:01:00,002 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=880c0fb4b1e8ffad1f423d4e22137a95, regionState=CLOSED 2024-12-11T23:01:00,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 80ba19fc77334646b38565c176ea8e92, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:01:00,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:01:00,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-11T23:01:00,005 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 80ba19fc77334646b38565c176ea8e92, server=b762025f20c5,33867,1733957770938 in 161 msec 2024-12-11T23:01:00,006 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=80ba19fc77334646b38565c176ea8e92, UNASSIGN in 166 msec 2024-12-11T23:01:00,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-11T23:01:00,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 880c0fb4b1e8ffad1f423d4e22137a95, server=b762025f20c5,37563,1733957770731 in 163 msec 2024-12-11T23:01:00,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-12-11T23:01:00,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=880c0fb4b1e8ffad1f423d4e22137a95, UNASSIGN in 167 msec 2024-12-11T23:01:00,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-11T23:01:00,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 170 msec 2024-12-11T23:01:00,009 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958060009"}]},"ts":"1733958060009"} 2024-12-11T23:01:00,010 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-11T23:01:00,010 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-11T23:01:00,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 179 msec 2024-12-11T23:01:00,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T23:01:00,155 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T23:01:00,156 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,157 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,158 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,160 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,161 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:01:00,161 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92 2024-12-11T23:01:00,162 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/recovered.edits] 2024-12-11T23:01:00,162 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/recovered.edits] 2024-12-11T23:01:00,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T23:01:00,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T23:01:00,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T23:01:00,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T23:01:00,182 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf/fa26a807a33f44f481ada99213722a57 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/cf/fa26a807a33f44f481ada99213722a57 2024-12-11T23:01:00,182 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf/4b922f8070b448f2a7611c5b6f559fc6 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/cf/4b922f8070b448f2a7611c5b6f559fc6 2024-12-11T23:01:00,185 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92/recovered.edits/9.seqid 2024-12-11T23:01:00,185 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95/recovered.edits/9.seqid 2024-12-11T23:01:00,185 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:01:00,185 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportExpiredSnapshot/80ba19fc77334646b38565c176ea8e92 2024-12-11T23:01:00,185 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-11T23:01:00,185 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-11T23:01:00,186 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-11T23:01:00,188 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,188 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,188 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,189 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,189 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024121110c1f8202ed84fc2b1409c3a1f15f385_80ba19fc77334646b38565c176ea8e92 2024-12-11T23:01:00,190 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202412119132ae391ee44a8e85066d991a82098b_880c0fb4b1e8ffad1f423d4e22137a95 2024-12-11T23:01:00,190 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-11T23:01:00,192 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,194 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-11T23:01:00,196 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-11T23:01:00,197 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,197 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-11T23:01:00,197 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958060197"}]},"ts":"9223372036854775807"} 2024-12-11T23:01:00,197 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958060197"}]},"ts":"9223372036854775807"} 2024-12-11T23:01:00,199 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T23:01:00,199 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 880c0fb4b1e8ffad1f423d4e22137a95, NAME => 'testtb-testExportExpiredSnapshot,,1733958045925.880c0fb4b1e8ffad1f423d4e22137a95.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 80ba19fc77334646b38565c176ea8e92, NAME => 'testtb-testExportExpiredSnapshot,1,1733958045925.80ba19fc77334646b38565c176ea8e92.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T23:01:00,199 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-11T23:01:00,199 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958060199"}]},"ts":"9223372036854775807"} 2024-12-11T23:01:00,200 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-11T23:01:00,201 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 45 msec 2024-12-11T23:01:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-11T23:01:00,294 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-11T23:01:00,295 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T23:01:00,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-11T23:01:00,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-11T23:01:00,303 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-11T23:01:00,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-11T23:01:00,305 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-11T23:01:00,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-11T23:01:00,322 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=798 (was 814), OpenFileDescriptor=769 (was 803), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=648 (was 749), ProcessCount=15 (was 15), AvailableMemoryMB=8791 (was 4667) - AvailableMemoryMB LEAK? - 2024-12-11T23:01:00,322 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-11T23:01:00,336 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=798, OpenFileDescriptor=769, MaxFileDescriptor=1048576, SystemLoadAverage=648, ProcessCount=15, AvailableMemoryMB=8791 2024-12-11T23:01:00,336 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-11T23:01:00,338 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:01:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:00,339 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:01:00,339 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-11T23:01:00,340 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:01:00,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T23:01:00,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742271_1447 (size=448) 2024-12-11T23:01:00,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742271_1447 (size=448) 2024-12-11T23:01:00,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742271_1447 (size=448) 2024-12-11T23:01:00,347 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fc0fa65c1c04f6b4c4e3b7d344b787ce, NAME => 'testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:00,347 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 440b3bb9d89164ea53314f56180d9703, NAME => 'testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:00,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742272_1448 (size=73) 2024-12-11T23:01:00,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742273_1449 (size=73) 2024-12-11T23:01:00,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742272_1448 (size=73) 2024-12-11T23:01:00,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742273_1449 (size=73) 2024-12-11T23:01:00,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742273_1449 (size=73) 2024-12-11T23:01:00,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742272_1448 (size=73) 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing fc0fa65c1c04f6b4c4e3b7d344b787ce, disabling compactions & flushes 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 440b3bb9d89164ea53314f56180d9703, disabling compactions & flushes 2024-12-11T23:01:00,357 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,357 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. after waiting 0 ms 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. after waiting 0 ms 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,357 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,357 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 440b3bb9d89164ea53314f56180d9703: Waiting for close lock at 1733958060357Disabling compacts and flushes for region at 1733958060357Disabling writes for close at 1733958060357Writing region close event to WAL at 1733958060357Closed at 1733958060357 2024-12-11T23:01:00,357 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for fc0fa65c1c04f6b4c4e3b7d344b787ce: Waiting for close lock at 1733958060357Disabling compacts and flushes for region at 1733958060357Disabling writes for close at 1733958060357Writing region close event to WAL at 1733958060357Closed at 1733958060357 2024-12-11T23:01:00,358 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:01:00,358 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733958060358"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958060358"}]},"ts":"1733958060358"} 2024-12-11T23:01:00,358 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733958060358"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958060358"}]},"ts":"1733958060358"} 2024-12-11T23:01:00,360 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:01:00,360 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:01:00,361 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958060361"}]},"ts":"1733958060361"} 2024-12-11T23:01:00,362 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-11T23:01:00,362 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:01:00,363 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:01:00,363 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:01:00,363 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:01:00,363 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:01:00,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, ASSIGN}] 2024-12-11T23:01:00,364 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, ASSIGN 2024-12-11T23:01:00,364 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, ASSIGN 2024-12-11T23:01:00,364 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T23:01:00,364 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, ASSIGN; state=OFFLINE, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:01:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T23:01:00,515 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:01:00,515 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=fc0fa65c1c04f6b4c4e3b7d344b787ce, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:01:00,515 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=440b3bb9d89164ea53314f56180d9703, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:01:00,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, ASSIGN because future has completed 2024-12-11T23:01:00,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:01:00,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, ASSIGN because future has completed 2024-12-11T23:01:00,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 440b3bb9d89164ea53314f56180d9703, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:01:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T23:01:00,676 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,676 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,676 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 440b3bb9d89164ea53314f56180d9703, NAME => 'testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T23:01:00,676 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => fc0fa65c1c04f6b4c4e3b7d344b787ce, NAME => 'testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:01:00,677 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. service=AccessControlService 2024-12-11T23:01:00,677 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. service=AccessControlService 2024-12-11T23:01:00,677 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:01:00,677 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,678 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,680 INFO [StoreOpener-fc0fa65c1c04f6b4c4e3b7d344b787ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,680 INFO [StoreOpener-440b3bb9d89164ea53314f56180d9703-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,681 INFO [StoreOpener-fc0fa65c1c04f6b4c4e3b7d344b787ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fc0fa65c1c04f6b4c4e3b7d344b787ce columnFamilyName cf 2024-12-11T23:01:00,682 INFO [StoreOpener-440b3bb9d89164ea53314f56180d9703-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 440b3bb9d89164ea53314f56180d9703 columnFamilyName cf 2024-12-11T23:01:00,683 DEBUG [StoreOpener-fc0fa65c1c04f6b4c4e3b7d344b787ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:00,683 DEBUG [StoreOpener-440b3bb9d89164ea53314f56180d9703-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:00,683 INFO [StoreOpener-fc0fa65c1c04f6b4c4e3b7d344b787ce-1 {}] regionserver.HStore(327): Store=fc0fa65c1c04f6b4c4e3b7d344b787ce/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:01:00,683 INFO [StoreOpener-440b3bb9d89164ea53314f56180d9703-1 {}] regionserver.HStore(327): Store=440b3bb9d89164ea53314f56180d9703/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:01:00,683 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,683 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,684 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,684 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,684 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,684 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,685 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,685 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,685 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,685 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,686 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,686 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,688 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:01:00,688 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:01:00,688 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened fc0fa65c1c04f6b4c4e3b7d344b787ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67550860, jitterRate=0.006586253643035889}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:01:00,688 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:00,688 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 440b3bb9d89164ea53314f56180d9703; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63150001, jitterRate=-0.058991655707359314}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:01:00,688 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:00,688 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 440b3bb9d89164ea53314f56180d9703: Running coprocessor pre-open hook at 1733958060678Writing region info on filesystem at 1733958060678Initializing all the Stores at 1733958060679 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958060679Cleaning up temporary data from old regions at 1733958060685 (+6 ms)Running coprocessor post-open hooks at 1733958060688 (+3 ms)Region opened successfully at 1733958060688 2024-12-11T23:01:00,688 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for fc0fa65c1c04f6b4c4e3b7d344b787ce: Running coprocessor pre-open hook at 1733958060678Writing region info on filesystem at 1733958060678Initializing all the Stores at 1733958060679 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958060679Cleaning up temporary data from old regions at 1733958060685 (+6 ms)Running coprocessor post-open hooks at 1733958060688 (+3 ms)Region opened successfully at 1733958060688 2024-12-11T23:01:00,689 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce., pid=198, masterSystemTime=1733958060669 2024-12-11T23:01:00,689 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703., pid=199, masterSystemTime=1733958060670 2024-12-11T23:01:00,690 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,690 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,691 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=fc0fa65c1c04f6b4c4e3b7d344b787ce, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:01:00,691 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,691 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:00,692 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=440b3bb9d89164ea53314f56180d9703, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:01:00,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:01:00,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 440b3bb9d89164ea53314f56180d9703, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:01:00,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-12-11T23:01:00,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce, server=b762025f20c5,41635,1733957771023 in 176 msec 2024-12-11T23:01:00,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-12-11T23:01:00,695 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 440b3bb9d89164ea53314f56180d9703, server=b762025f20c5,33867,1733957770938 in 176 msec 2024-12-11T23:01:00,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, ASSIGN in 331 msec 2024-12-11T23:01:00,697 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-12-11T23:01:00,697 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, ASSIGN in 332 msec 2024-12-11T23:01:00,697 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:01:00,698 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958060697"}]},"ts":"1733958060697"} 2024-12-11T23:01:00,699 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-11T23:01:00,700 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:01:00,700 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-11T23:01:00,702 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T23:01:00,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,805 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:00,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 467 msec 2024-12-11T23:01:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T23:01:00,965 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T23:01:00,965 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:00,968 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-11T23:01:00,968 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:00,968 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:01:00,969 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:00,974 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:00,979 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:00,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T23:01:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958060982 (current time:1733958060982). 2024-12-11T23:01:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:01:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-11T23:01:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:01:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@551d4879, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:00,984 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:00,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:00,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:00,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@141e6e6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:00,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:00,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:00,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:00,986 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53872, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:00,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@446c7750, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:00,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:00,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:00,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:00,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40918, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:00,990 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:01:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:00,991 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6155829b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:00,993 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:00,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:00,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:00,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d94b593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:00,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:00,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:00,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:00,994 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:00,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@765ca541, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:00,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:00,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:00,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:00,997 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:00,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:01:00,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:00,999 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34710, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:01,000 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:01:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,000 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T23:01:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:01:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T23:01:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-11T23:01:01,002 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:01:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T23:01:01,003 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:01:01,005 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:01:01,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742274_1450 (size=185) 2024-12-11T23:01:01,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742274_1450 (size=185) 2024-12-11T23:01:01,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742274_1450 (size=185) 2024-12-11T23:01:01,013 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:01:01,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703}] 2024-12-11T23:01:01,013 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,013 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T23:01:01,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-11T23:01:01,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 440b3bb9d89164ea53314f56180d9703: 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for fc0fa65c1c04f6b4c4e3b7d344b787ce: 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:01,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:01,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:01:01,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:01:01,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742275_1451 (size=76) 2024-12-11T23:01:01,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742275_1451 (size=76) 2024-12-11T23:01:01,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742276_1452 (size=76) 2024-12-11T23:01:01,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742275_1451 (size=76) 2024-12-11T23:01:01,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742276_1452 (size=76) 2024-12-11T23:01:01,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:01,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742276_1452 (size=76) 2024-12-11T23:01:01,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-11T23:01:01,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:01,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-11T23:01:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-11T23:01:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-11T23:01:01,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,172 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,172 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,172 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce in 159 msec 2024-12-11T23:01:01,175 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-12-11T23:01:01,175 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:01:01,175 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703 in 159 msec 2024-12-11T23:01:01,175 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:01:01,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:01:01,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:01:01,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:01,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T23:01:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742277_1453 (size=68) 2024-12-11T23:01:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742277_1453 (size=68) 2024-12-11T23:01:01,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742277_1453 (size=68) 2024-12-11T23:01:01,183 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:01:01,183 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,184 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742278_1454 (size=673) 2024-12-11T23:01:01,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742278_1454 (size=673) 2024-12-11T23:01:01,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742278_1454 (size=673) 2024-12-11T23:01:01,193 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:01:01,196 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:01:01,197 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,198 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:01:01,198 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-11T23:01:01,199 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 197 msec 2024-12-11T23:01:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T23:01:01,316 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T23:01:01,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:01:01,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33867 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:01:01,329 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:01,331 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-11T23:01:01,331 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:01,331 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:01:01,332 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:01,335 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:01,339 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T23:01:01,342 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T23:01:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958061342 (current time:1733958061342). 2024-12-11T23:01:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:01:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-11T23:01:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:01:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@264daacb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:01,344 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e3d45e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,345 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:01,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5efc04d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:01,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:01,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:01,348 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:01,349 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:01:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,349 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@783f1ff2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:01,350 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:01,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:01,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:01,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3852791b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:01,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:01,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:01,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,351 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53928, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:01,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6d0b63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:01,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:01,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:01,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:01,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:01,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:01:01,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:01,356 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34726, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:01,357 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:01:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:01,358 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T23:01:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:01:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T23:01:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-11T23:01:01,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T23:01:01,360 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:01:01,361 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:01:01,363 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:01:01,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742279_1455 (size=180) 2024-12-11T23:01:01,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742279_1455 (size=180) 2024-12-11T23:01:01,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742279_1455 (size=180) 2024-12-11T23:01:01,372 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:01:01,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703}] 2024-12-11T23:01:01,372 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,372 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T23:01:01,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-11T23:01:01,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-11T23:01:01,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:01,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:01,525 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing fc0fa65c1c04f6b4c4e3b7d344b787ce 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T23:01:01,525 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 440b3bb9d89164ea53314f56180d9703 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T23:01:01,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce is 71, key is 0298c334a02e73fd554c597268044ae2/cf:q/1733958061326/Put/seqid=0 2024-12-11T23:01:01,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703 is 71, key is 104189ff795663d3c2850a687dc569bd/cf:q/1733958061328/Put/seqid=0 2024-12-11T23:01:01,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742280_1456 (size=5241) 2024-12-11T23:01:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742281_1457 (size=8031) 2024-12-11T23:01:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742280_1456 (size=5241) 2024-12-11T23:01:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742280_1456 (size=5241) 2024-12-11T23:01:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742281_1457 (size=8031) 2024-12-11T23:01:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742281_1457 (size=8031) 2024-12-11T23:01:01,552 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:01,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:01,556 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,556 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/.tmp/cf/0482b7f4c78c4e018fa0c89f7b3c36e4, store: [table=testtb-testEmptyExportFileSystemState family=cf region=440b3bb9d89164ea53314f56180d9703] 2024-12-11T23:01:01,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/.tmp/cf/e063556b08cc4774941a49616c03cfa1, store: [table=testtb-testEmptyExportFileSystemState family=cf region=fc0fa65c1c04f6b4c4e3b7d344b787ce] 2024-12-11T23:01:01,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/.tmp/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 is 214, key is 11e4bf75f7902e8f0849240157b3f37e0/cf:q/1733958061328/Put/seqid=0 2024-12-11T23:01:01,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/.tmp/cf/e063556b08cc4774941a49616c03cfa1 is 214, key is 03d6f39663330d56235a87ecb2032b186/cf:q/1733958061326/Put/seqid=0 2024-12-11T23:01:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742283_1459 (size=6356) 2024-12-11T23:01:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742283_1459 (size=6356) 2024-12-11T23:01:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742283_1459 (size=6356) 2024-12-11T23:01:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742282_1458 (size=14817) 2024-12-11T23:01:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742282_1458 (size=14817) 2024-12-11T23:01:01,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/.tmp/cf/e063556b08cc4774941a49616c03cfa1 2024-12-11T23:01:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742282_1458 (size=14817) 2024-12-11T23:01:01,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/.tmp/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 2024-12-11T23:01:01,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/.tmp/cf/e063556b08cc4774941a49616c03cfa1 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf/e063556b08cc4774941a49616c03cfa1 2024-12-11T23:01:01,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/.tmp/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 2024-12-11T23:01:01,571 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf/0482b7f4c78c4e018fa0c89f7b3c36e4, entries=45, sequenceid=6, filesize=14.5 K 2024-12-11T23:01:01,571 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf/e063556b08cc4774941a49616c03cfa1, entries=5, sequenceid=6, filesize=6.2 K 2024-12-11T23:01:01,571 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for fc0fa65c1c04f6b4c4e3b7d344b787ce in 46ms, sequenceid=6, compaction requested=false 2024-12-11T23:01:01,571 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 440b3bb9d89164ea53314f56180d9703 in 46ms, sequenceid=6, compaction requested=false 2024-12-11T23:01:01,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-11T23:01:01,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 440b3bb9d89164ea53314f56180d9703: 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for fc0fa65c1c04f6b4c4e3b7d344b787ce: 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf/0482b7f4c78c4e018fa0c89f7b3c36e4] hfiles 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf/e063556b08cc4774941a49616c03cfa1] hfiles 2024-12-11T23:01:01,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf/e063556b08cc4774941a49616c03cfa1 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742285_1461 (size=115) 2024-12-11T23:01:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742284_1460 (size=115) 2024-12-11T23:01:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742284_1460 (size=115) 2024-12-11T23:01:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742285_1461 (size=115) 2024-12-11T23:01:01,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:01,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-11T23:01:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742284_1460 (size=115) 2024-12-11T23:01:01,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742285_1461 (size=115) 2024-12-11T23:01:01,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:01,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-11T23:01:01,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-11T23:01:01,595 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-11T23:01:01,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,595 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,595 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 440b3bb9d89164ea53314f56180d9703 in 223 msec 2024-12-11T23:01:01,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=204, resume processing ppid=203 2024-12-11T23:01:01,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce in 223 msec 2024-12-11T23:01:01,597 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:01:01,598 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:01:01,598 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:01:01,598 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:01:01,598 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:01,599 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce] hfiles 2024-12-11T23:01:01,599 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:01,599 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742286_1462 (size=299) 2024-12-11T23:01:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742286_1462 (size=299) 2024-12-11T23:01:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742286_1462 (size=299) 2024-12-11T23:01:01,605 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:01:01,605 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,606 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742287_1463 (size=983) 2024-12-11T23:01:01,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742287_1463 (size=983) 2024-12-11T23:01:01,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742287_1463 (size=983) 2024-12-11T23:01:01,630 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:01:01,635 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:01:01,635 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,637 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:01:01,637 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-11T23:01:01,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 279 msec 2024-12-11T23:01:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T23:01:01,675 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T23:01:01,675 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675 2024-12-11T23:01:01,675 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:01,699 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:01,699 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,701 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:01:01,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:01,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742289_1465 (size=673) 2024-12-11T23:01:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742288_1464 (size=185) 2024-12-11T23:01:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742289_1465 (size=673) 2024-12-11T23:01:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742288_1464 (size=185) 2024-12-11T23:01:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742288_1464 (size=185) 2024-12-11T23:01:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742289_1465 (size=673) 2024-12-11T23:01:01,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:01,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:01,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-16553402791148759895.jar 2024-12-11T23:01:02,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-13807929425218372596.jar 2024-12-11T23:01:02,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:02,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T23:01:02,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T23:01:02,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T23:01:02,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T23:01:02,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T23:01:02,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T23:01:02,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T23:01:02,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T23:01:02,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T23:01:02,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T23:01:02,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T23:01:02,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:02,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:02,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:02,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:02,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:02,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:02,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:02,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742290_1466 (size=131440) 2024-12-11T23:01:02,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742290_1466 (size=131440) 2024-12-11T23:01:02,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742290_1466 (size=131440) 2024-12-11T23:01:02,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742291_1467 (size=4188619) 2024-12-11T23:01:02,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742291_1467 (size=4188619) 2024-12-11T23:01:02,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742291_1467 (size=4188619) 2024-12-11T23:01:02,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742292_1468 (size=1323991) 2024-12-11T23:01:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742292_1468 (size=1323991) 2024-12-11T23:01:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742292_1468 (size=1323991) 2024-12-11T23:01:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742293_1469 (size=903541) 2024-12-11T23:01:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742293_1469 (size=903541) 2024-12-11T23:01:02,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742293_1469 (size=903541) 2024-12-11T23:01:02,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742294_1470 (size=8360360) 2024-12-11T23:01:02,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742294_1470 (size=8360360) 2024-12-11T23:01:02,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742294_1470 (size=8360360) 2024-12-11T23:01:02,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742295_1471 (size=6426222) 2024-12-11T23:01:02,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742295_1471 (size=6426222) 2024-12-11T23:01:02,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742295_1471 (size=6426222) 2024-12-11T23:01:02,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742296_1472 (size=1877034) 2024-12-11T23:01:02,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742296_1472 (size=1877034) 2024-12-11T23:01:02,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742296_1472 (size=1877034) 2024-12-11T23:01:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742297_1473 (size=77835) 2024-12-11T23:01:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742297_1473 (size=77835) 2024-12-11T23:01:02,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742297_1473 (size=77835) 2024-12-11T23:01:02,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742298_1474 (size=30949) 2024-12-11T23:01:02,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742298_1474 (size=30949) 2024-12-11T23:01:02,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742298_1474 (size=30949) 2024-12-11T23:01:02,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742299_1475 (size=1597213) 2024-12-11T23:01:02,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742299_1475 (size=1597213) 2024-12-11T23:01:02,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742299_1475 (size=1597213) 2024-12-11T23:01:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742300_1476 (size=4695811) 2024-12-11T23:01:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742300_1476 (size=4695811) 2024-12-11T23:01:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742300_1476 (size=4695811) 2024-12-11T23:01:02,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742301_1477 (size=232957) 2024-12-11T23:01:02,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742301_1477 (size=232957) 2024-12-11T23:01:02,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742301_1477 (size=232957) 2024-12-11T23:01:02,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742302_1478 (size=127628) 2024-12-11T23:01:02,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742302_1478 (size=127628) 2024-12-11T23:01:02,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742302_1478 (size=127628) 2024-12-11T23:01:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742303_1479 (size=20406) 2024-12-11T23:01:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742303_1479 (size=20406) 2024-12-11T23:01:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742303_1479 (size=20406) 2024-12-11T23:01:02,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742304_1480 (size=443172) 2024-12-11T23:01:02,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742304_1480 (size=443172) 2024-12-11T23:01:02,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742304_1480 (size=443172) 2024-12-11T23:01:02,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742305_1481 (size=5175431) 2024-12-11T23:01:02,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742305_1481 (size=5175431) 2024-12-11T23:01:02,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742305_1481 (size=5175431) 2024-12-11T23:01:02,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742306_1482 (size=217634) 2024-12-11T23:01:02,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742306_1482 (size=217634) 2024-12-11T23:01:02,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742306_1482 (size=217634) 2024-12-11T23:01:02,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742307_1483 (size=1832290) 2024-12-11T23:01:02,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742307_1483 (size=1832290) 2024-12-11T23:01:02,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742307_1483 (size=1832290) 2024-12-11T23:01:02,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742308_1484 (size=322274) 2024-12-11T23:01:02,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742308_1484 (size=322274) 2024-12-11T23:01:02,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742308_1484 (size=322274) 2024-12-11T23:01:02,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742309_1485 (size=503880) 2024-12-11T23:01:02,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742309_1485 (size=503880) 2024-12-11T23:01:02,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742309_1485 (size=503880) 2024-12-11T23:01:02,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742310_1486 (size=29229) 2024-12-11T23:01:02,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742310_1486 (size=29229) 2024-12-11T23:01:02,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742310_1486 (size=29229) 2024-12-11T23:01:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742311_1487 (size=24096) 2024-12-11T23:01:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742311_1487 (size=24096) 2024-12-11T23:01:02,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742311_1487 (size=24096) 2024-12-11T23:01:02,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742312_1488 (size=111872) 2024-12-11T23:01:02,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742312_1488 (size=111872) 2024-12-11T23:01:02,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742312_1488 (size=111872) 2024-12-11T23:01:02,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742313_1489 (size=45609) 2024-12-11T23:01:02,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742313_1489 (size=45609) 2024-12-11T23:01:02,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742313_1489 (size=45609) 2024-12-11T23:01:02,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742314_1490 (size=136454) 2024-12-11T23:01:02,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742314_1490 (size=136454) 2024-12-11T23:01:02,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742314_1490 (size=136454) 2024-12-11T23:01:02,876 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T23:01:02,877 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-11T23:01:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742315_1491 (size=7) 2024-12-11T23:01:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742315_1491 (size=7) 2024-12-11T23:01:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742315_1491 (size=7) 2024-12-11T23:01:02,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742316_1492 (size=10) 2024-12-11T23:01:02,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742316_1492 (size=10) 2024-12-11T23:01:02,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742316_1492 (size=10) 2024-12-11T23:01:02,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742317_1493 (size=303903) 2024-12-11T23:01:02,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742317_1493 (size=303903) 2024-12-11T23:01:02,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742317_1493 (size=303903) 2024-12-11T23:01:02,911 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:01:02,911 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:01:03,303 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0008_000001 (auth:SIMPLE) from 127.0.0.1:57114 2024-12-11T23:01:05,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:01:08,685 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:01:08,728 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0008_000001 (auth:SIMPLE) from 127.0.0.1:43430 2024-12-11T23:01:08,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742318_1494 (size=349577) 2024-12-11T23:01:08,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742318_1494 (size=349577) 2024-12-11T23:01:08,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742318_1494 (size=349577) 2024-12-11T23:01:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742319_1495 (size=8568) 2024-12-11T23:01:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742319_1495 (size=8568) 2024-12-11T23:01:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742319_1495 (size=8568) 2024-12-11T23:01:09,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742320_1496 (size=460) 2024-12-11T23:01:09,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742320_1496 (size=460) 2024-12-11T23:01:09,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742320_1496 (size=460) 2024-12-11T23:01:09,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742321_1497 (size=8568) 2024-12-11T23:01:09,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742321_1497 (size=8568) 2024-12-11T23:01:09,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742321_1497 (size=8568) 2024-12-11T23:01:09,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742322_1498 (size=349577) 2024-12-11T23:01:09,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742322_1498 (size=349577) 2024-12-11T23:01:09,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742322_1498 (size=349577) 2024-12-11T23:01:10,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-11T23:01:10,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-11T23:01:10,896 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T23:01:10,988 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T23:01:11,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T23:01:11,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T23:01:11,034 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:11,034 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T23:01:11,035 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T23:01:11,035 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:11,035 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-11T23:01:11,035 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-11T23:01:11,035 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:11,036 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-11T23:01:11,036 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958061675/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-11T23:01:11,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-11T23:01:11,043 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958071043"}]},"ts":"1733958071043"} 2024-12-11T23:01:11,044 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-11T23:01:11,044 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-11T23:01:11,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-11T23:01:11,046 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, UNASSIGN}] 2024-12-11T23:01:11,047 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, UNASSIGN 2024-12-11T23:01:11,047 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, UNASSIGN 2024-12-11T23:01:11,047 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=440b3bb9d89164ea53314f56180d9703, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:01:11,047 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=fc0fa65c1c04f6b4c4e3b7d344b787ce, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:01:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, UNASSIGN because future has completed 2024-12-11T23:01:11,049 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:01:11,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 440b3bb9d89164ea53314f56180d9703, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:01:11,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, UNASSIGN because future has completed 2024-12-11T23:01:11,049 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:01:11,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:01:11,067 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T23:01:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-11T23:01:11,201 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:11,201 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:11,201 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:01:11,201 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:01:11,201 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing fc0fa65c1c04f6b4c4e3b7d344b787ce, disabling compactions & flushes 2024-12-11T23:01:11,201 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 440b3bb9d89164ea53314f56180d9703, disabling compactions & flushes 2024-12-11T23:01:11,201 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:11,201 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:11,201 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:11,201 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:11,202 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. after waiting 0 ms 2024-12-11T23:01:11,202 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. after waiting 0 ms 2024-12-11T23:01:11,202 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:11,202 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:11,207 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:01:11,207 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:01:11,208 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:01:11,208 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:01:11,208 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce. 2024-12-11T23:01:11,208 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703. 2024-12-11T23:01:11,208 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 440b3bb9d89164ea53314f56180d9703: Waiting for close lock at 1733958071201Running coprocessor pre-close hooks at 1733958071201Disabling compacts and flushes for region at 1733958071201Disabling writes for close at 1733958071202 (+1 ms)Writing region close event to WAL at 1733958071202Running coprocessor post-close hooks at 1733958071208 (+6 ms)Closed at 1733958071208 2024-12-11T23:01:11,208 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for fc0fa65c1c04f6b4c4e3b7d344b787ce: Waiting for close lock at 1733958071201Running coprocessor pre-close hooks at 1733958071201Disabling compacts and flushes for region at 1733958071201Disabling writes for close at 1733958071202 (+1 ms)Writing region close event to WAL at 1733958071203 (+1 ms)Running coprocessor post-close hooks at 1733958071208 (+5 ms)Closed at 1733958071208 2024-12-11T23:01:11,210 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:11,211 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=440b3bb9d89164ea53314f56180d9703, regionState=CLOSED 2024-12-11T23:01:11,211 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:11,212 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=fc0fa65c1c04f6b4c4e3b7d344b787ce, regionState=CLOSED 2024-12-11T23:01:11,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 440b3bb9d89164ea53314f56180d9703, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:01:11,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:01:11,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-12-11T23:01:11,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 440b3bb9d89164ea53314f56180d9703, server=b762025f20c5,33867,1733957770938 in 165 msec 2024-12-11T23:01:11,216 DEBUG [master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=20, reuseRatio=66.67% 2024-12-11T23:01:11,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-12-11T23:01:11,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=440b3bb9d89164ea53314f56180d9703, UNASSIGN in 170 msec 2024-12-11T23:01:11,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure fc0fa65c1c04f6b4c4e3b7d344b787ce, server=b762025f20c5,41635,1733957771023 in 166 msec 2024-12-11T23:01:11,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-12-11T23:01:11,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=fc0fa65c1c04f6b4c4e3b7d344b787ce, UNASSIGN in 171 msec 2024-12-11T23:01:11,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-11T23:01:11,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 174 msec 2024-12-11T23:01:11,221 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958071220"}]},"ts":"1733958071220"} 2024-12-11T23:01:11,222 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-11T23:01:11,222 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-11T23:01:11,223 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 182 msec 2024-12-11T23:01:11,224 DEBUG [master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-11T23:01:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-11T23:01:11,356 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T23:01:11,356 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,359 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,360 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,364 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,366 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:11,366 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:11,368 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/recovered.edits] 2024-12-11T23:01:11,368 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/recovered.edits] 2024-12-11T23:01:11,657 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf/e063556b08cc4774941a49616c03cfa1 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/cf/e063556b08cc4774941a49616c03cfa1 2024-12-11T23:01:11,657 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/cf/0482b7f4c78c4e018fa0c89f7b3c36e4 2024-12-11T23:01:11,663 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce/recovered.edits/9.seqid 2024-12-11T23:01:11,663 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703/recovered.edits/9.seqid 2024-12-11T23:01:11,663 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:11,663 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testEmptyExportFileSystemState/440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:11,663 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-11T23:01:11,664 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-11T23:01:11,665 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-11T23:01:11,702 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211f5a65568626544f59404e71377275ba6_440b3bb9d89164ea53314f56180d9703 2024-12-11T23:01:11,702 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211e13fef3e110d4a3b9c36ddee3a31acff_fc0fa65c1c04f6b4c4e3b7d344b787ce 2024-12-11T23:01:11,703 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-11T23:01:11,704 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,706 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-11T23:01:11,708 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-11T23:01:11,709 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,709 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-11T23:01:11,709 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958071709"}]},"ts":"9223372036854775807"} 2024-12-11T23:01:11,709 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958071709"}]},"ts":"9223372036854775807"} 2024-12-11T23:01:11,710 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T23:01:11,710 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => fc0fa65c1c04f6b4c4e3b7d344b787ce, NAME => 'testtb-testEmptyExportFileSystemState,,1733958060337.fc0fa65c1c04f6b4c4e3b7d344b787ce.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 440b3bb9d89164ea53314f56180d9703, NAME => 'testtb-testEmptyExportFileSystemState,1,1733958060337.440b3bb9d89164ea53314f56180d9703.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T23:01:11,710 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-11T23:01:11,710 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958071710"}]},"ts":"9223372036854775807"} 2024-12-11T23:01:11,712 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-11T23:01:11,712 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 356 msec 2024-12-11T23:01:11,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,743 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T23:01:11,743 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T23:01:11,743 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T23:01:11,743 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:11,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-11T23:01:11,771 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-11T23:01:11,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:11,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:11,771 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T23:01:11,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:11,772 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:11,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-11T23:01:11,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:11,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-11T23:01:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-11T23:01:11,800 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=811 (was 798) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:47988 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_41542083_1 at /127.0.0.1:36042 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:44503 from appattempt_1733957780200_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:36064 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_41542083_1 at /127.0.0.1:47964 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7450 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 138766) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45323 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:57204 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45323 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44503 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=797 (was 769) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=618 (was 648), ProcessCount=18 (was 15) - ProcessCount LEAK? -, AvailableMemoryMB=8299 (was 8791) 2024-12-11T23:01:11,800 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-11T23:01:11,826 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=811, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=618, ProcessCount=18, AvailableMemoryMB=8297 2024-12-11T23:01:11,826 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-11T23:01:11,827 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:01:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:01:11,829 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:01:11,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-11T23:01:11,830 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:01:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T23:01:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742323_1499 (size=440) 2024-12-11T23:01:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742323_1499 (size=440) 2024-12-11T23:01:11,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742323_1499 (size=440) 2024-12-11T23:01:11,837 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 18f6027bec6e6407f29328b479aa7efc, NAME => 'testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:11,837 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b8cf674e6e4898a7bd768ade4bf1a10a, NAME => 'testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742325_1501 (size=65) 2024-12-11T23:01:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742325_1501 (size=65) 2024-12-11T23:01:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742325_1501 (size=65) 2024-12-11T23:01:11,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742324_1500 (size=65) 2024-12-11T23:01:11,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742324_1500 (size=65) 2024-12-11T23:01:11,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742324_1500 (size=65) 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 18f6027bec6e6407f29328b479aa7efc, disabling compactions & flushes 2024-12-11T23:01:11,846 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing b8cf674e6e4898a7bd768ade4bf1a10a, disabling compactions & flushes 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. after waiting 0 ms 2024-12-11T23:01:11,846 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:11,846 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. after waiting 0 ms 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 18f6027bec6e6407f29328b479aa7efc: Waiting for close lock at 1733958071846Disabling compacts and flushes for region at 1733958071846Disabling writes for close at 1733958071846Writing region close event to WAL at 1733958071846Closed at 1733958071846 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:11,846 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:11,846 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for b8cf674e6e4898a7bd768ade4bf1a10a: Waiting for close lock at 1733958071846Disabling compacts and flushes for region at 1733958071846Disabling writes for close at 1733958071846Writing region close event to WAL at 1733958071846Closed at 1733958071846 2024-12-11T23:01:11,847 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:01:11,848 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733958071847"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958071847"}]},"ts":"1733958071847"} 2024-12-11T23:01:11,848 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733958071847"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958071847"}]},"ts":"1733958071847"} 2024-12-11T23:01:11,850 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:01:11,851 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:01:11,851 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958071851"}]},"ts":"1733958071851"} 2024-12-11T23:01:11,853 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-11T23:01:11,853 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:01:11,854 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:01:11,854 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:01:11,854 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:01:11,854 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:01:11,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, ASSIGN}] 2024-12-11T23:01:11,855 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, ASSIGN 2024-12-11T23:01:11,855 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, ASSIGN 2024-12-11T23:01:11,856 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T23:01:11,856 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T23:01:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T23:01:12,006 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:01:12,006 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:01:12,006 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=18f6027bec6e6407f29328b479aa7efc, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:01:12,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, ASSIGN because future has completed 2024-12-11T23:01:12,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:01:12,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, ASSIGN because future has completed 2024-12-11T23:01:12,009 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 18f6027bec6e6407f29328b479aa7efc, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:01:12,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T23:01:12,164 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:12,164 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:12,164 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 18f6027bec6e6407f29328b479aa7efc, NAME => 'testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T23:01:12,164 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => b8cf674e6e4898a7bd768ade4bf1a10a, NAME => 'testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. service=AccessControlService 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. service=AccessControlService 2024-12-11T23:01:12,165 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:01:12,165 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,165 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,166 INFO [StoreOpener-18f6027bec6e6407f29328b479aa7efc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,166 INFO [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,167 INFO [StoreOpener-18f6027bec6e6407f29328b479aa7efc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18f6027bec6e6407f29328b479aa7efc columnFamilyName cf 2024-12-11T23:01:12,167 INFO [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b8cf674e6e4898a7bd768ade4bf1a10a columnFamilyName cf 2024-12-11T23:01:12,168 DEBUG [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:12,168 DEBUG [StoreOpener-18f6027bec6e6407f29328b479aa7efc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:12,168 INFO [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] regionserver.HStore(327): Store=b8cf674e6e4898a7bd768ade4bf1a10a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:01:12,169 INFO [StoreOpener-18f6027bec6e6407f29328b479aa7efc-1 {}] regionserver.HStore(327): Store=18f6027bec6e6407f29328b479aa7efc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:01:12,169 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,169 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,169 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,170 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,170 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,170 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,170 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,171 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,171 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,172 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,172 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:01:12,173 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened b8cf674e6e4898a7bd768ade4bf1a10a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67323609, jitterRate=0.003199949860572815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:01:12,173 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,174 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for b8cf674e6e4898a7bd768ade4bf1a10a: Running coprocessor pre-open hook at 1733958072165Writing region info on filesystem at 1733958072165Initializing all the Stores at 1733958072166 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958072166Cleaning up temporary data from old regions at 1733958072170 (+4 ms)Running coprocessor post-open hooks at 1733958072173 (+3 ms)Region opened successfully at 1733958072174 (+1 ms) 2024-12-11T23:01:12,175 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,175 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a., pid=216, masterSystemTime=1733958072160 2024-12-11T23:01:12,177 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:12,177 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:12,178 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:01:12,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:01:12,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-12-11T23:01:12,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,37563,1733957770731 in 173 msec 2024-12-11T23:01:12,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, ASSIGN in 328 msec 2024-12-11T23:01:12,188 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:01:12,189 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 18f6027bec6e6407f29328b479aa7efc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68669412, jitterRate=0.023253977298736572}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:01:12,189 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,189 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 18f6027bec6e6407f29328b479aa7efc: Running coprocessor pre-open hook at 1733958072165Writing region info on filesystem at 1733958072165Initializing all the Stores at 1733958072166 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958072166Cleaning up temporary data from old regions at 1733958072172 (+6 ms)Running coprocessor post-open hooks at 1733958072189 (+17 ms)Region opened successfully at 1733958072189 2024-12-11T23:01:12,190 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc., pid=217, masterSystemTime=1733958072161 2024-12-11T23:01:12,196 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:12,196 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:12,197 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=18f6027bec6e6407f29328b479aa7efc, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:01:12,200 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 18f6027bec6e6407f29328b479aa7efc, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:01:12,203 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-12-11T23:01:12,203 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 18f6027bec6e6407f29328b479aa7efc, server=b762025f20c5,41635,1733957771023 in 192 msec 2024-12-11T23:01:12,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=215, resume processing ppid=213 2024-12-11T23:01:12,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, ASSIGN in 349 msec 2024-12-11T23:01:12,206 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:01:12,207 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958072206"}]},"ts":"1733958072206"} 2024-12-11T23:01:12,210 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-11T23:01:12,216 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:01:12,216 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-11T23:01:12,220 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T23:01:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:12,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:12,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:01:12,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 435 msec 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T23:01:12,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T23:01:12,456 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T23:01:12,456 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:12,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-11T23:01:12,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:12,462 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:01:12,464 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:12,469 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:12,475 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:12,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T23:01:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958072478 (current time:1733958072478). 2024-12-11T23:01:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:01:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-11T23:01:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:01:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e3a8997, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:12,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:12,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:12,479 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:12,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:12,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:12,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b501710, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:12,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:12,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:12,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:12,481 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:12,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@694592d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:12,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:12,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:12,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:12,485 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55418, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:12,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:01:12,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:12,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:12,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:12,486 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:12,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f8893a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:12,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:12,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:12,488 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:12,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:12,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:12,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c6aab83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:12,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:12,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:12,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:12,489 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53984, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:12,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@760c62aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:12,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:12,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:12,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:12,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:12,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:01:12,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:12,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59184, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:12,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:01:12,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:12,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:12,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:12,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T23:01:12,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:01:12,499 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:12,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T23:01:12,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-11T23:01:12,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T23:01:12,502 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:01:12,503 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:01:12,505 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:01:12,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742326_1502 (size=161) 2024-12-11T23:01:12,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742326_1502 (size=161) 2024-12-11T23:01:12,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742326_1502 (size=161) 2024-12-11T23:01:12,528 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:01:12,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc}] 2024-12-11T23:01:12,529 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,530 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T23:01:12,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-11T23:01:12,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for b8cf674e6e4898a7bd768ade4bf1a10a: 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. for emptySnaptb0-testExportWithChecksum completed. 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 18f6027bec6e6407f29328b479aa7efc: 2024-12-11T23:01:12,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. for emptySnaptb0-testExportWithChecksum completed. 2024-12-11T23:01:12,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-11T23:01:12,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:12,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:01:12,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742327_1503 (size=68) 2024-12-11T23:01:12,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742328_1504 (size=68) 2024-12-11T23:01:12,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742327_1503 (size=68) 2024-12-11T23:01:12,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742328_1504 (size=68) 2024-12-11T23:01:12,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742328_1504 (size=68) 2024-12-11T23:01:12,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742327_1503 (size=68) 2024-12-11T23:01:12,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:12,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-11T23:01:12,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:12,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-11T23:01:12,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-11T23:01:12,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,696 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:12,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-11T23:01:12,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,696 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:12,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a in 169 msec 2024-12-11T23:01:12,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-11T23:01:12,699 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:01:12,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc in 169 msec 2024-12-11T23:01:12,700 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:01:12,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:01:12,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:01:12,701 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:12,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T23:01:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742329_1505 (size=60) 2024-12-11T23:01:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742329_1505 (size=60) 2024-12-11T23:01:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742329_1505 (size=60) 2024-12-11T23:01:12,710 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:01:12,710 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-11T23:01:12,711 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-11T23:01:12,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742330_1506 (size=641) 2024-12-11T23:01:12,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742330_1506 (size=641) 2024-12-11T23:01:12,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742330_1506 (size=641) 2024-12-11T23:01:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T23:01:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T23:01:13,142 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:01:13,163 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:01:13,163 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-11T23:01:13,165 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:01:13,166 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-11T23:01:13,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 667 msec 2024-12-11T23:01:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T23:01:13,636 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T23:01:13,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:01:13,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:01:13,647 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:13,650 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-11T23:01:13,650 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:13,650 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:01:13,652 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:13,663 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:13,672 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T23:01:13,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T23:01:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958073676 (current time:1733958073676). 2024-12-11T23:01:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:01:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-11T23:01:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:01:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2591eb11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:13,680 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:13,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:13,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:13,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57091688, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:13,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:13,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:13,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:13,682 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:13,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52a56049, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:13,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:13,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:13,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:13,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:01:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:13,688 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69584e6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:01:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:01:13,693 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:01:13,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:01:13,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:01:13,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dc6dd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:13,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:01:13,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:01:13,696 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:01:13,696 INFO [regionserver/b762025f20c5:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T23:01:13,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c02c89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:01:13,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:01:13,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:01:13,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:13,703 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:13,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:01:13,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:01:13,707 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:01:13,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:01:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:01:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:01:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T23:01:13,709 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:01:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:01:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T23:01:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-11T23:01:13,713 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:01:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T23:01:13,714 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:01:13,719 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:01:13,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742331_1507 (size=156) 2024-12-11T23:01:13,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742331_1507 (size=156) 2024-12-11T23:01:13,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742331_1507 (size=156) 2024-12-11T23:01:13,732 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:01:13,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc}] 2024-12-11T23:01:13,733 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:13,733 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:13,742 INFO [regionserver/b762025f20c5:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T23:01:13,757 INFO [regionserver/b762025f20c5:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T23:01:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T23:01:13,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-11T23:01:13,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-11T23:01:13,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:13,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:13,885 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing b8cf674e6e4898a7bd768ade4bf1a10a 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T23:01:13,886 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 18f6027bec6e6407f29328b479aa7efc 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T23:01:13,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a is 71, key is 009a11f34d285c91071ee816fdc75fc4/cf:q/1733958073644/Put/seqid=0 2024-12-11T23:01:13,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc is 71, key is 1257b75652980d8666b71d1a0f0d44e7/cf:q/1733958073645/Put/seqid=0 2024-12-11T23:01:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742332_1508 (size=8031) 2024-12-11T23:01:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742332_1508 (size=8031) 2024-12-11T23:01:13,987 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:13,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742333_1509 (size=5241) 2024-12-11T23:01:13,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742332_1508 (size=8031) 2024-12-11T23:01:13,992 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:13,993 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/.tmp/cf/3f3126521b374e368bb31b0f28005320, store: [table=testtb-testExportWithChecksum family=cf region=18f6027bec6e6407f29328b479aa7efc] 2024-12-11T23:01:13,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/.tmp/cf/3f3126521b374e368bb31b0f28005320 is 206, key is 1912d19dea5226e4ec17d9b0c04b3d777/cf:q/1733958073645/Put/seqid=0 2024-12-11T23:01:13,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742333_1509 (size=5241) 2024-12-11T23:01:13,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742333_1509 (size=5241) 2024-12-11T23:01:13,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:14,003 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:14,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/.tmp/cf/56c3e252abdf47f4ad85b8f6b0063295, store: [table=testtb-testExportWithChecksum family=cf region=b8cf674e6e4898a7bd768ade4bf1a10a] 2024-12-11T23:01:14,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/.tmp/cf/56c3e252abdf47f4ad85b8f6b0063295 is 206, key is 015292bfce4fa2187786333b4a747c3fe/cf:q/1733958073644/Put/seqid=0 2024-12-11T23:01:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T23:01:14,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742334_1510 (size=14449) 2024-12-11T23:01:14,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742334_1510 (size=14449) 2024-12-11T23:01:14,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742334_1510 (size=14449) 2024-12-11T23:01:14,028 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/.tmp/cf/3f3126521b374e368bb31b0f28005320 2024-12-11T23:01:14,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/.tmp/cf/3f3126521b374e368bb31b0f28005320 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 2024-12-11T23:01:14,046 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320, entries=45, sequenceid=6, filesize=14.1 K 2024-12-11T23:01:14,047 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 18f6027bec6e6407f29328b479aa7efc in 162ms, sequenceid=6, compaction requested=false 2024-12-11T23:01:14,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-11T23:01:14,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 18f6027bec6e6407f29328b479aa7efc: 2024-12-11T23:01:14,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. for snaptb0-testExportWithChecksum completed. 2024-12-11T23:01:14,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-11T23:01:14,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:14,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320] hfiles 2024-12-11T23:01:14,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 for snapshot=snaptb0-testExportWithChecksum 2024-12-11T23:01:14,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742335_1511 (size=6308) 2024-12-11T23:01:14,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742335_1511 (size=6308) 2024-12-11T23:01:14,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742335_1511 (size=6308) 2024-12-11T23:01:14,069 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/.tmp/cf/56c3e252abdf47f4ad85b8f6b0063295 2024-12-11T23:01:14,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/.tmp/cf/56c3e252abdf47f4ad85b8f6b0063295 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 2024-12-11T23:01:14,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295, entries=5, sequenceid=6, filesize=6.2 K 2024-12-11T23:01:14,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for b8cf674e6e4898a7bd768ade4bf1a10a in 198ms, sequenceid=6, compaction requested=false 2024-12-11T23:01:14,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for b8cf674e6e4898a7bd768ade4bf1a10a: 2024-12-11T23:01:14,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. for snaptb0-testExportWithChecksum completed. 2024-12-11T23:01:14,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-11T23:01:14,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:01:14,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295] hfiles 2024-12-11T23:01:14,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 for snapshot=snaptb0-testExportWithChecksum 2024-12-11T23:01:14,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742336_1512 (size=107) 2024-12-11T23:01:14,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742336_1512 (size=107) 2024-12-11T23:01:14,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742336_1512 (size=107) 2024-12-11T23:01:14,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:01:14,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-11T23:01:14,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-11T23:01:14,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:14,100 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:14,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 18f6027bec6e6407f29328b479aa7efc in 369 msec 2024-12-11T23:01:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742337_1513 (size=107) 2024-12-11T23:01:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742337_1513 (size=107) 2024-12-11T23:01:14,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:14,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-11T23:01:14,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742337_1513 (size=107) 2024-12-11T23:01:14,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-11T23:01:14,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:14,115 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:14,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-11T23:01:14,121 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:01:14,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a in 385 msec 2024-12-11T23:01:14,121 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:01:14,128 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:01:14,128 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:01:14,128 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:14,129 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a] hfiles 2024-12-11T23:01:14,129 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:01:14,129 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:14,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742338_1514 (size=291) 2024-12-11T23:01:14,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742338_1514 (size=291) 2024-12-11T23:01:14,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742338_1514 (size=291) 2024-12-11T23:01:14,153 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:01:14,153 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-11T23:01:14,154 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T23:01:14,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742339_1515 (size=951) 2024-12-11T23:01:14,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742339_1515 (size=951) 2024-12-11T23:01:14,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742339_1515 (size=951) 2024-12-11T23:01:14,316 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:01:14,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T23:01:14,361 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:01:14,362 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T23:01:14,364 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:01:14,364 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-11T23:01:14,366 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 655 msec 2024-12-11T23:01:14,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T23:01:14,845 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T23:01:14,846 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846 2024-12-11T23:01:14,846 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:14,884 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:14,884 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@758cec5e, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T23:01:14,886 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:01:14,891 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T23:01:14,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:14,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:14,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:15,170 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-11T23:01:15,170 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-11T23:01:15,173 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 189b562ad19e67a2b978aec9d57dad01 changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:01:15,173 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f2c62d44aedcf51b4978173adc0395ae changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:01:15,173 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b8cf674e6e4898a7bd768ade4bf1a10a changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:01:15,173 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 18f6027bec6e6407f29328b479aa7efc changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:01:15,174 DEBUG [master/b762025f20c5:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-11T23:01:15,174 INFO [master/b762025f20c5:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-11T23:01:15,174 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 1 regions 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 3 regions 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:01:15,175 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:01:15,176 INFO [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:01:15,176 INFO [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:01:15,176 INFO [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:01:15,176 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-11T23:01:15,178 INFO [master/b762025f20c5:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-12-11T23:01:15,179 INFO [master/b762025f20c5:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2515037645791281, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8131716222850254, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8518719381398412, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-12-11T23:01:15,396 INFO [master/b762025f20c5:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 219 ms to try 14400 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2515037645791281 to a new imbalance of 0.01563737144776112. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8131716222850254, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8518719381398412, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-11T23:01:15,401 INFO [master/b762025f20c5:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 6 2024-12-11T23:01:15,401 INFO [master/b762025f20c5:0.Chore.1 {}] master.HMaster(2172): balance hri=b8cf674e6e4898a7bd768ade4bf1a10a, source=b762025f20c5,37563,1733957770731, destination=b762025f20c5,33867,1733957770938 2024-12-11T23:01:15,402 DEBUG [master/b762025f20c5:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, REOPEN/MOVE 2024-12-11T23:01:15,403 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, REOPEN/MOVE 2024-12-11T23:01:15,404 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:01:15,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, REOPEN/MOVE because future has completed 2024-12-11T23:01:15,412 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:01:15,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:01:15,565 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(122): Close b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,566 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:01:15,566 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1722): Closing b8cf674e6e4898a7bd768ade4bf1a10a, disabling compactions & flushes 2024-12-11T23:01:15,566 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,566 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,566 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. after waiting 0 ms 2024-12-11T23:01:15,566 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,575 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:01:15,576 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:01:15,576 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,576 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1676): Region close journal for b8cf674e6e4898a7bd768ade4bf1a10a: Waiting for close lock at 1733958075566Running coprocessor pre-close hooks at 1733958075566Disabling compacts and flushes for region at 1733958075566Disabling writes for close at 1733958075566Writing region close event to WAL at 1733958075567 (+1 ms)Running coprocessor post-close hooks at 1733958075576 (+9 ms)Closed at 1733958075576 2024-12-11T23:01:15,577 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionServer(3302): Adding b8cf674e6e4898a7bd768ade4bf1a10a move to b762025f20c5,33867,1733957770938 record at close sequenceid=6 2024-12-11T23:01:15,578 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(157): Closed b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,579 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=CLOSED 2024-12-11T23:01:15,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:01:15,583 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-11T23:01:15,583 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,37563,1733957770731 in 169 msec 2024-12-11T23:01:15,583 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, REOPEN/MOVE; state=CLOSED, location=b762025f20c5,33867,1733957770938; forceNewPlan=false, retain=false 2024-12-11T23:01:15,734 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T23:01:15,734 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=OPENING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:01:15,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, REOPEN/MOVE because future has completed 2024-12-11T23:01:15,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:01:15,745 INFO [regionserver/b762025f20c5:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 39978 ms 2024-12-11T23:01:15,890 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,890 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => b8cf674e6e4898a7bd768ade4bf1a10a, NAME => 'testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:01:15,890 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. service=AccessControlService 2024-12-11T23:01:15,890 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:01:15,890 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,890 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:01:15,891 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7794): checking encryption for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,891 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7797): checking classloading for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,892 INFO [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,892 INFO [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b8cf674e6e4898a7bd768ade4bf1a10a columnFamilyName cf 2024-12-11T23:01:15,893 DEBUG [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:01:15,897 DEBUG [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 2024-12-11T23:01:15,897 INFO [StoreOpener-b8cf674e6e4898a7bd768ade4bf1a10a-1 {}] regionserver.HStore(327): Store=b8cf674e6e4898a7bd768ade4bf1a10a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:01:15,898 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1038): replaying wal for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,899 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,899 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,900 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1048): stopping wal replay for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,900 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,901 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1093): writing seq id for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,902 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1114): Opened b8cf674e6e4898a7bd768ade4bf1a10a; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65503620, jitterRate=-0.023919999599456787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:01:15,902 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:01:15,902 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1006): Region open journal for b8cf674e6e4898a7bd768ade4bf1a10a: Running coprocessor pre-open hook at 1733958075891Writing region info on filesystem at 1733958075891Initializing all the Stores at 1733958075891Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958075891Cleaning up temporary data from old regions at 1733958075900 (+9 ms)Running coprocessor post-open hooks at 1733958075902 (+2 ms)Region opened successfully at 1733958075902 2024-12-11T23:01:15,902 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a., pid=226, masterSystemTime=1733958075887 2024-12-11T23:01:15,904 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,904 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:01:15,904 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=OPEN, openSeqNum=10, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:01:15,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:01:15,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-11T23:01:15,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,33867,1733957770938 in 170 msec 2024-12-11T23:01:15,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, REOPEN/MOVE in 507 msec 2024-12-11T23:01:16,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-701716502358299140.jar 2024-12-11T23:01:16,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,004 DEBUG [master/b762025f20c5:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-11T23:01:16,004 DEBUG [master/b762025f20c5:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b8cf674e6e4898a7bd768ade4bf1a10a changed from -1.0 to 0.0, refreshing cache 2024-12-11T23:01:16,011 DEBUG [master/b762025f20c5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T23:01:16,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-8515644534183991827.jar 2024-12-11T23:01:16,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,060 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:16,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T23:01:16,061 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T23:01:16,062 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0008_000001 (auth:SIMPLE) from 127.0.0.1:55666 2024-12-11T23:01:16,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T23:01:16,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T23:01:16,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T23:01:16,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T23:01:16,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T23:01:16,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T23:01:16,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T23:01:16,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T23:01:16,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T23:01:16,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:16,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:16,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:16,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:16,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:16,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:16,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:16,070 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0008/container_1733957780200_0008_01_000001/launch_container.sh] 2024-12-11T23:01:16,070 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0008/container_1733957780200_0008_01_000001/container_tokens] 2024-12-11T23:01:16,070 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0008/container_1733957780200_0008_01_000001/sysfs] 2024-12-11T23:01:16,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742340_1516 (size=131440) 2024-12-11T23:01:16,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742340_1516 (size=131440) 2024-12-11T23:01:16,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742340_1516 (size=131440) 2024-12-11T23:01:16,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742341_1517 (size=4188619) 2024-12-11T23:01:16,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742341_1517 (size=4188619) 2024-12-11T23:01:16,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742341_1517 (size=4188619) 2024-12-11T23:01:16,732 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:01:17,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742342_1518 (size=1323991) 2024-12-11T23:01:17,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742342_1518 (size=1323991) 2024-12-11T23:01:17,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742342_1518 (size=1323991) 2024-12-11T23:01:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742343_1519 (size=903541) 2024-12-11T23:01:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742343_1519 (size=903541) 2024-12-11T23:01:17,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742343_1519 (size=903541) 2024-12-11T23:01:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742344_1520 (size=8360360) 2024-12-11T23:01:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742344_1520 (size=8360360) 2024-12-11T23:01:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742344_1520 (size=8360360) 2024-12-11T23:01:17,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742345_1521 (size=1877034) 2024-12-11T23:01:17,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742345_1521 (size=1877034) 2024-12-11T23:01:17,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742345_1521 (size=1877034) 2024-12-11T23:01:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742346_1522 (size=77835) 2024-12-11T23:01:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742346_1522 (size=77835) 2024-12-11T23:01:17,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742346_1522 (size=77835) 2024-12-11T23:01:17,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742347_1523 (size=30949) 2024-12-11T23:01:17,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742347_1523 (size=30949) 2024-12-11T23:01:17,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742347_1523 (size=30949) 2024-12-11T23:01:17,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742348_1524 (size=1597213) 2024-12-11T23:01:17,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742348_1524 (size=1597213) 2024-12-11T23:01:17,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742348_1524 (size=1597213) 2024-12-11T23:01:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742349_1525 (size=4695811) 2024-12-11T23:01:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742349_1525 (size=4695811) 2024-12-11T23:01:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742349_1525 (size=4695811) 2024-12-11T23:01:17,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742350_1526 (size=232957) 2024-12-11T23:01:17,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742350_1526 (size=232957) 2024-12-11T23:01:17,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742350_1526 (size=232957) 2024-12-11T23:01:17,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742351_1527 (size=127628) 2024-12-11T23:01:17,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742351_1527 (size=127628) 2024-12-11T23:01:17,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742351_1527 (size=127628) 2024-12-11T23:01:17,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742352_1528 (size=20406) 2024-12-11T23:01:17,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742352_1528 (size=20406) 2024-12-11T23:01:17,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742352_1528 (size=20406) 2024-12-11T23:01:17,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742353_1529 (size=5175431) 2024-12-11T23:01:17,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742353_1529 (size=5175431) 2024-12-11T23:01:17,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742353_1529 (size=5175431) 2024-12-11T23:01:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742354_1530 (size=443172) 2024-12-11T23:01:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742354_1530 (size=443172) 2024-12-11T23:01:17,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742354_1530 (size=443172) 2024-12-11T23:01:17,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742355_1531 (size=217634) 2024-12-11T23:01:17,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742355_1531 (size=217634) 2024-12-11T23:01:17,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742355_1531 (size=217634) 2024-12-11T23:01:17,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742356_1532 (size=1832290) 2024-12-11T23:01:17,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742356_1532 (size=1832290) 2024-12-11T23:01:17,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742356_1532 (size=1832290) 2024-12-11T23:01:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742357_1533 (size=322274) 2024-12-11T23:01:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742357_1533 (size=322274) 2024-12-11T23:01:17,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742357_1533 (size=322274) 2024-12-11T23:01:17,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742358_1534 (size=503880) 2024-12-11T23:01:17,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742358_1534 (size=503880) 2024-12-11T23:01:17,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742358_1534 (size=503880) 2024-12-11T23:01:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742359_1535 (size=29229) 2024-12-11T23:01:17,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742359_1535 (size=29229) 2024-12-11T23:01:17,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742359_1535 (size=29229) 2024-12-11T23:01:17,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742360_1536 (size=24096) 2024-12-11T23:01:17,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742360_1536 (size=24096) 2024-12-11T23:01:17,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742360_1536 (size=24096) 2024-12-11T23:01:17,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742361_1537 (size=6426222) 2024-12-11T23:01:17,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742361_1537 (size=6426222) 2024-12-11T23:01:17,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742361_1537 (size=6426222) 2024-12-11T23:01:17,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742362_1538 (size=111872) 2024-12-11T23:01:17,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742362_1538 (size=111872) 2024-12-11T23:01:17,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742362_1538 (size=111872) 2024-12-11T23:01:17,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742363_1539 (size=45609) 2024-12-11T23:01:17,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742363_1539 (size=45609) 2024-12-11T23:01:17,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742363_1539 (size=45609) 2024-12-11T23:01:17,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742364_1540 (size=136454) 2024-12-11T23:01:17,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742364_1540 (size=136454) 2024-12-11T23:01:17,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742364_1540 (size=136454) 2024-12-11T23:01:17,575 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T23:01:17,577 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-11T23:01:17,579 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.1 K 2024-12-11T23:01:17,579 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-11T23:01:17,579 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-11T23:01:17,579 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-11T23:01:17,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742365_1541 (size=1023) 2024-12-11T23:01:17,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742365_1541 (size=1023) 2024-12-11T23:01:17,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742365_1541 (size=1023) 2024-12-11T23:01:17,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742366_1542 (size=35) 2024-12-11T23:01:17,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742366_1542 (size=35) 2024-12-11T23:01:17,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742366_1542 (size=35) 2024-12-11T23:01:17,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742367_1543 (size=304038) 2024-12-11T23:01:17,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742367_1543 (size=304038) 2024-12-11T23:01:17,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742367_1543 (size=304038) 2024-12-11T23:01:17,649 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:01:17,650 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:01:17,776 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41598 2024-12-11T23:01:20,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-11T23:01:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-11T23:01:20,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-11T23:01:20,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-11T23:01:23,673 INFO [regionserver/b762025f20c5:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. because 83c7242f4212a8a5171da84f96da9bc6/l has an old edit so flush to free WALs after random delay 24610 ms 2024-12-11T23:01:25,399 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:60382 2024-12-11T23:01:25,623 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:01:25,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742368_1544 (size=349736) 2024-12-11T23:01:25,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742368_1544 (size=349736) 2024-12-11T23:01:25,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742368_1544 (size=349736) 2024-12-11T23:01:27,621 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:34648 2024-12-11T23:01:27,621 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:56866 2024-12-11T23:01:28,513 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:36418 2024-12-11T23:01:28,516 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:32768 2024-12-11T23:01:31,064 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T23:01:32,611 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 189b562ad19e67a2b978aec9d57dad01, had cached 0 bytes from a total of 14463 2024-12-11T23:01:32,611 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f2c62d44aedcf51b4978173adc0395ae, had cached 0 bytes from a total of 6088 2024-12-11T23:01:34,530 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000002/launch_container.sh] 2024-12-11T23:01:34,530 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000002/container_tokens] 2024-12-11T23:01:34,531 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:35,587 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:36424 2024-12-11T23:01:35,728 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000003/launch_container.sh] 2024-12-11T23:01:35,729 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000003/container_tokens] 2024-12-11T23:01:35,729 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:36,489 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000005/launch_container.sh] 2024-12-11T23:01:36,489 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000005/container_tokens] 2024-12-11T23:01:36,489 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000005/sysfs] 2024-12-11T23:01:36,490 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000004/launch_container.sh] 2024-12-11T23:01:36,490 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000004/container_tokens] 2024-12-11T23:01:36,491 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000004/sysfs] 2024-12-11T23:01:36,539 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:32782 2024-12-11T23:01:36,540 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:36426 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:37,544 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:36438 2024-12-11T23:01:38,686 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:01:39,478 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000011 while processing FINISH_CONTAINERS event 2024-12-11T23:01:40,475 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000012 while processing FINISH_CONTAINERS event 2024-12-11T23:01:40,824 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000007/launch_container.sh] 2024-12-11T23:01:40,824 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000007/container_tokens] 2024-12-11T23:01:40,825 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:41,632 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000009/launch_container.sh] 2024-12-11T23:01:41,632 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000009/container_tokens] 2024-12-11T23:01:41,632 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000009/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:42,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:56234 2024-12-11T23:01:42,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:42492 2024-12-11T23:01:42,691 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000010/launch_container.sh] 2024-12-11T23:01:42,692 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000010/container_tokens] 2024-12-11T23:01:42,692 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:43,566 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:56240 2024-12-11T23:01:43,576 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:42500 2024-12-11T23:01:44,678 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000017 while processing FINISH_CONTAINERS event 2024-12-11T23:01:45,679 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000018 while processing FINISH_CONTAINERS event 2024-12-11T23:01:46,719 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000008/launch_container.sh] 2024-12-11T23:01:46,719 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000008/container_tokens] 2024-12-11T23:01:46,719 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000008/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:48,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 83c7242f4212a8a5171da84f96da9bc6 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-12-11T23:01:48,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/.tmp/l/ed5652a65b5e442ba7d201d53c15e662 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733958045174/DeleteFamily/seqid=0 2024-12-11T23:01:48,827 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:40962 2024-12-11T23:01:48,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742370_1546 (size=5791) 2024-12-11T23:01:48,832 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41590 2024-12-11T23:01:48,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742370_1546 (size=5791) 2024-12-11T23:01:48,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/.tmp/l/ed5652a65b5e442ba7d201d53c15e662 2024-12-11T23:01:48,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742370_1546 (size=5791) 2024-12-11T23:01:48,869 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed5652a65b5e442ba7d201d53c15e662 2024-12-11T23:01:48,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/.tmp/l/ed5652a65b5e442ba7d201d53c15e662 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/l/ed5652a65b5e442ba7d201d53c15e662 2024-12-11T23:01:48,877 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed5652a65b5e442ba7d201d53c15e662 2024-12-11T23:01:48,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/l/ed5652a65b5e442ba7d201d53c15e662, entries=13, sequenceid=28, filesize=5.7 K 2024-12-11T23:01:48,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 83c7242f4212a8a5171da84f96da9bc6 in 596ms, sequenceid=28, compaction requested=false 2024-12-11T23:01:48,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 83c7242f4212a8a5171da84f96da9bc6: 2024-12-11T23:01:49,480 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000016/launch_container.sh] 2024-12-11T23:01:49,480 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000016/container_tokens] 2024-12-11T23:01:49,480 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000016/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/local-export-1733958074846/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T23:01:50,835 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41596 2024-12-11T23:01:51,839 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41600 2024-12-11T23:01:52,497 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000014/launch_container.sh] 2024-12-11T23:01:52,497 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000014/container_tokens] 2024-12-11T23:01:52,497 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000014/sysfs] 2024-12-11T23:01:52,516 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000013/launch_container.sh] 2024-12-11T23:01:52,517 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000013/container_tokens] 2024-12-11T23:01:52,517 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000013/sysfs] 2024-12-11T23:01:53,417 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000023 while processing FINISH_CONTAINERS event 2024-12-11T23:01:53,823 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:40970 2024-12-11T23:01:53,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41608 2024-12-11T23:01:53,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41602 2024-12-11T23:01:53,925 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733957780200_0009_01_000022 is : 143 2024-12-11T23:01:53,932 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0009_01_000024 while processing FINISH_CONTAINERS event 2024-12-11T23:01:53,953 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733957780200_0009_01_000019 is : 143 2024-12-11T23:01:53,967 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000022/launch_container.sh] 2024-12-11T23:01:53,967 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000022/container_tokens] 2024-12-11T23:01:53,967 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000022/sysfs] 2024-12-11T23:01:53,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742369_1545 (size=48777) 2024-12-11T23:01:53,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742369_1545 (size=48777) 2024-12-11T23:01:53,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742369_1545 (size=48777) 2024-12-11T23:01:53,990 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000019/launch_container.sh] 2024-12-11T23:01:53,990 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000019/container_tokens] 2024-12-11T23:01:53,990 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000019/sysfs] 2024-12-11T23:01:54,003 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733957780200_0009_01_000021 is : 143 2024-12-11T23:01:54,018 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000021/launch_container.sh] 2024-12-11T23:01:54,018 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000021/container_tokens] 2024-12-11T23:01:54,018 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000021/sysfs] 2024-12-11T23:01:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742371_1547 (size=460) 2024-12-11T23:01:54,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742371_1547 (size=460) 2024-12-11T23:01:54,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742371_1547 (size=460) 2024-12-11T23:01:54,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742372_1548 (size=48777) 2024-12-11T23:01:54,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742372_1548 (size=48777) 2024-12-11T23:01:54,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742372_1548 (size=48777) 2024-12-11T23:01:54,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742373_1549 (size=349736) 2024-12-11T23:01:54,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742373_1549 (size=349736) 2024-12-11T23:01:54,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742373_1549 (size=349736) 2024-12-11T23:01:54,186 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:41620 2024-12-11T23:01:55,062 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000015/launch_container.sh] 2024-12-11T23:01:55,062 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000015/container_tokens] 2024-12-11T23:01:55,062 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_3/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000015/sysfs] 2024-12-11T23:01:55,723 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-11T23:01:55,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=78.71 KB heapSize=124.51 KB 2024-12-11T23:01:55,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/info/e9846cd4bd8b418f820b783a8c1ac316 is 181, key is testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc./info:regioninfo/1733958072197/Put/seqid=0 2024-12-11T23:01:55,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742374_1550 (size=18209) 2024-12-11T23:01:55,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742374_1550 (size=18209) 2024-12-11T23:01:55,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.54 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/info/e9846cd4bd8b418f820b783a8c1ac316 2024-12-11T23:01:55,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742374_1550 (size=18209) 2024-12-11T23:01:55,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/ns/d90a3e6bec534ce88b3dbe8f9f11a294 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc./ns:/1733958045199/DeleteFamily/seqid=0 2024-12-11T23:01:55,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742375_1551 (size=7924) 2024-12-11T23:01:55,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742375_1551 (size=7924) 2024-12-11T23:01:55,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742375_1551 (size=7924) 2024-12-11T23:01:55,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/ns/d90a3e6bec534ce88b3dbe8f9f11a294 2024-12-11T23:01:55,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/rep_barrier/4a75190603e44f01a11f71e16aaeb22a is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc./rep_barrier:/1733958045199/DeleteFamily/seqid=0 2024-12-11T23:01:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742376_1552 (size=8195) 2024-12-11T23:01:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742376_1552 (size=8195) 2024-12-11T23:01:55,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742376_1552 (size=8195) 2024-12-11T23:01:55,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/rep_barrier/4a75190603e44f01a11f71e16aaeb22a 2024-12-11T23:01:55,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/table/c928e2afc6e94d45b0feb470593ab956 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733958027543.f7e2555a0bb1b75579ee19dc57dddecc./table:/1733958045199/DeleteFamily/seqid=0 2024-12-11T23:01:55,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742377_1553 (size=9051) 2024-12-11T23:01:55,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742377_1553 (size=9051) 2024-12-11T23:01:55,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742377_1553 (size=9051) 2024-12-11T23:01:55,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/table/c928e2afc6e94d45b0feb470593ab956 2024-12-11T23:01:55,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/info/e9846cd4bd8b418f820b783a8c1ac316 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/info/e9846cd4bd8b418f820b783a8c1ac316 2024-12-11T23:01:55,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/info/e9846cd4bd8b418f820b783a8c1ac316, entries=101, sequenceid=215, filesize=17.8 K 2024-12-11T23:01:55,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/ns/d90a3e6bec534ce88b3dbe8f9f11a294 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/ns/d90a3e6bec534ce88b3dbe8f9f11a294 2024-12-11T23:01:55,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/ns/d90a3e6bec534ce88b3dbe8f9f11a294, entries=24, sequenceid=215, filesize=7.7 K 2024-12-11T23:01:55,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/rep_barrier/4a75190603e44f01a11f71e16aaeb22a as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/rep_barrier/4a75190603e44f01a11f71e16aaeb22a 2024-12-11T23:01:55,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/rep_barrier/4a75190603e44f01a11f71e16aaeb22a, entries=22, sequenceid=215, filesize=8.0 K 2024-12-11T23:01:55,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/table/c928e2afc6e94d45b0feb470593ab956 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/table/c928e2afc6e94d45b0feb470593ab956 2024-12-11T23:01:55,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/table/c928e2afc6e94d45b0feb470593ab956, entries=39, sequenceid=215, filesize=8.8 K 2024-12-11T23:01:55,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~78.71 KB/80594, heapSize ~124.45 KB/127432, currentSize=0 B/0 for 1588230740 in 149ms, sequenceid=215, compaction requested=false 2024-12-11T23:01:55,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-11T23:01:56,029 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733957780200_0009_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:01:56,030 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030 2024-12-11T23:01:56,030 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:56,064 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:01:56,064 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T23:01:56,067 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:01:56,073 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T23:01:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742379_1555 (size=951) 2024-12-11T23:01:56,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742378_1554 (size=156) 2024-12-11T23:01:56,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742379_1555 (size=951) 2024-12-11T23:01:56,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742378_1554 (size=156) 2024-12-11T23:01:56,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742378_1554 (size=156) 2024-12-11T23:01:56,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742379_1555 (size=951) 2024-12-11T23:01:56,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:56,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:56,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,094 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-5214330564871605955.jar 2024-12-11T23:01:57,095 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,095 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,165 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 18f6027bec6e6407f29328b479aa7efc, had cached 0 bytes from a total of 14449 2024-12-11T23:01:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-11037302581917545733.jar 2024-12-11T23:01:57,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:01:57,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T23:01:57,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T23:01:57,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T23:01:57,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T23:01:57,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T23:01:57,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T23:01:57,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T23:01:57,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T23:01:57,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T23:01:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T23:01:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T23:01:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:01:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:01:57,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742380_1556 (size=131440) 2024-12-11T23:01:57,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742380_1556 (size=131440) 2024-12-11T23:01:57,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742380_1556 (size=131440) 2024-12-11T23:01:57,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742381_1557 (size=4188619) 2024-12-11T23:01:57,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742381_1557 (size=4188619) 2024-12-11T23:01:57,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742381_1557 (size=4188619) 2024-12-11T23:01:57,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742382_1558 (size=1323991) 2024-12-11T23:01:57,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742382_1558 (size=1323991) 2024-12-11T23:01:57,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742382_1558 (size=1323991) 2024-12-11T23:01:57,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742383_1559 (size=903541) 2024-12-11T23:01:57,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742383_1559 (size=903541) 2024-12-11T23:01:57,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742383_1559 (size=903541) 2024-12-11T23:01:57,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742384_1560 (size=8360360) 2024-12-11T23:01:57,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742384_1560 (size=8360360) 2024-12-11T23:01:57,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742384_1560 (size=8360360) 2024-12-11T23:01:57,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742385_1561 (size=443172) 2024-12-11T23:01:57,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742385_1561 (size=443172) 2024-12-11T23:01:57,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742385_1561 (size=443172) 2024-12-11T23:01:57,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742386_1562 (size=1877034) 2024-12-11T23:01:57,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742386_1562 (size=1877034) 2024-12-11T23:01:57,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742386_1562 (size=1877034) 2024-12-11T23:01:57,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742387_1563 (size=77835) 2024-12-11T23:01:57,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742387_1563 (size=77835) 2024-12-11T23:01:57,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742387_1563 (size=77835) 2024-12-11T23:01:57,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742388_1564 (size=30949) 2024-12-11T23:01:57,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742388_1564 (size=30949) 2024-12-11T23:01:57,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742388_1564 (size=30949) 2024-12-11T23:01:57,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742389_1565 (size=1597213) 2024-12-11T23:01:57,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742389_1565 (size=1597213) 2024-12-11T23:01:57,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742389_1565 (size=1597213) 2024-12-11T23:01:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742390_1566 (size=4695811) 2024-12-11T23:01:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742390_1566 (size=4695811) 2024-12-11T23:01:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742390_1566 (size=4695811) 2024-12-11T23:01:57,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742391_1567 (size=232957) 2024-12-11T23:01:57,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742391_1567 (size=232957) 2024-12-11T23:01:57,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742391_1567 (size=232957) 2024-12-11T23:01:58,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742392_1568 (size=127628) 2024-12-11T23:01:58,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742392_1568 (size=127628) 2024-12-11T23:01:58,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742392_1568 (size=127628) 2024-12-11T23:01:58,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742393_1569 (size=20406) 2024-12-11T23:01:58,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742393_1569 (size=20406) 2024-12-11T23:01:58,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742393_1569 (size=20406) 2024-12-11T23:01:58,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742394_1570 (size=5175431) 2024-12-11T23:01:58,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742394_1570 (size=5175431) 2024-12-11T23:01:58,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742394_1570 (size=5175431) 2024-12-11T23:01:58,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742395_1571 (size=217634) 2024-12-11T23:01:58,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742395_1571 (size=217634) 2024-12-11T23:01:58,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742395_1571 (size=217634) 2024-12-11T23:01:58,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742396_1572 (size=1832290) 2024-12-11T23:01:58,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742396_1572 (size=1832290) 2024-12-11T23:01:58,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742396_1572 (size=1832290) 2024-12-11T23:01:58,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742397_1573 (size=322274) 2024-12-11T23:01:58,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742397_1573 (size=322274) 2024-12-11T23:01:58,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742397_1573 (size=322274) 2024-12-11T23:01:58,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742398_1574 (size=503880) 2024-12-11T23:01:58,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742398_1574 (size=503880) 2024-12-11T23:01:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742398_1574 (size=503880) 2024-12-11T23:01:58,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742399_1575 (size=29229) 2024-12-11T23:01:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742399_1575 (size=29229) 2024-12-11T23:01:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742399_1575 (size=29229) 2024-12-11T23:01:58,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742400_1576 (size=24096) 2024-12-11T23:01:58,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742400_1576 (size=24096) 2024-12-11T23:01:58,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742400_1576 (size=24096) 2024-12-11T23:01:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742401_1577 (size=6426222) 2024-12-11T23:01:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742401_1577 (size=6426222) 2024-12-11T23:01:58,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742401_1577 (size=6426222) 2024-12-11T23:01:58,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742402_1578 (size=111872) 2024-12-11T23:01:58,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742402_1578 (size=111872) 2024-12-11T23:01:58,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742402_1578 (size=111872) 2024-12-11T23:01:58,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742403_1579 (size=45609) 2024-12-11T23:01:58,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742403_1579 (size=45609) 2024-12-11T23:01:58,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742403_1579 (size=45609) 2024-12-11T23:01:58,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742404_1580 (size=136454) 2024-12-11T23:01:58,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742404_1580 (size=136454) 2024-12-11T23:01:58,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742404_1580 (size=136454) 2024-12-11T23:01:58,546 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T23:01:58,548 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-11T23:01:58,550 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.1 K 2024-12-11T23:01:58,551 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-11T23:01:58,551 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-11T23:01:58,551 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-11T23:01:58,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742405_1581 (size=1023) 2024-12-11T23:01:58,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742405_1581 (size=1023) 2024-12-11T23:01:58,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742405_1581 (size=1023) 2024-12-11T23:01:58,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742406_1582 (size=35) 2024-12-11T23:01:58,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742406_1582 (size=35) 2024-12-11T23:01:58,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742406_1582 (size=35) 2024-12-11T23:01:58,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742407_1583 (size=303994) 2024-12-11T23:01:58,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742407_1583 (size=303994) 2024-12-11T23:01:58,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742407_1583 (size=303994) 2024-12-11T23:01:59,136 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000020/launch_container.sh] 2024-12-11T23:01:59,136 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000020/container_tokens] 2024-12-11T23:01:59,136 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000020/sysfs] 2024-12-11T23:02:00,251 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:02:00,251 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:02:00,254 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0009_000001 (auth:SIMPLE) from 127.0.0.1:37470 2024-12-11T23:02:00,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000001/launch_container.sh] 2024-12-11T23:02:00,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000001/container_tokens] 2024-12-11T23:02:00,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0009/container_1733957780200_0009_01_000001/sysfs] 2024-12-11T23:02:00,891 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b8cf674e6e4898a7bd768ade4bf1a10a, had cached 0 bytes from a total of 6308 2024-12-11T23:02:01,036 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:44864 2024-12-11T23:02:05,392 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:33430 2024-12-11T23:02:05,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742408_1584 (size=349692) 2024-12-11T23:02:05,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742408_1584 (size=349692) 2024-12-11T23:02:05,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742408_1584 (size=349692) 2024-12-11T23:02:07,615 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:44872 2024-12-11T23:02:07,615 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:37472 2024-12-11T23:02:08,483 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:35892 2024-12-11T23:02:08,505 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:36532 2024-12-11T23:02:08,686 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:02:11,253 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T23:02:13,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742409_1585 (size=14449) 2024-12-11T23:02:13,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742409_1585 (size=14449) 2024-12-11T23:02:13,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742409_1585 (size=14449) 2024-12-11T23:02:14,777 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 43379 2024-12-11T23:02:14,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742411_1587 (size=8031) 2024-12-11T23:02:14,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742411_1587 (size=8031) 2024-12-11T23:02:14,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742411_1587 (size=8031) 2024-12-11T23:02:15,284 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000003/launch_container.sh] 2024-12-11T23:02:15,284 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000003/container_tokens] 2024-12-11T23:02:15,284 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000003/sysfs] 2024-12-11T23:02:15,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742412_1588 (size=6308) 2024-12-11T23:02:15,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742412_1588 (size=6308) 2024-12-11T23:02:15,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742412_1588 (size=6308) 2024-12-11T23:02:16,094 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83c7242f4212a8a5171da84f96da9bc6, had cached 0 bytes from a total of 5791 2024-12-11T23:02:16,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742413_1589 (size=5241) 2024-12-11T23:02:16,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742413_1589 (size=5241) 2024-12-11T23:02:16,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742413_1589 (size=5241) 2024-12-11T23:02:16,153 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000004/launch_container.sh] 2024-12-11T23:02:16,153 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000004/container_tokens] 2024-12-11T23:02:16,153 WARN [ContainersLauncher #7 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_0/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000004/sysfs] 2024-12-11T23:02:16,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742410_1586 (size=31733) 2024-12-11T23:02:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742410_1586 (size=31733) 2024-12-11T23:02:16,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742410_1586 (size=31733) 2024-12-11T23:02:16,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742414_1590 (size=463) 2024-12-11T23:02:16,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742414_1590 (size=463) 2024-12-11T23:02:16,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742414_1590 (size=463) 2024-12-11T23:02:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742415_1591 (size=31733) 2024-12-11T23:02:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742415_1591 (size=31733) 2024-12-11T23:02:16,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742415_1591 (size=31733) 2024-12-11T23:02:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742416_1592 (size=349692) 2024-12-11T23:02:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742416_1592 (size=349692) 2024-12-11T23:02:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742416_1592 (size=349692) 2024-12-11T23:02:16,260 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:36536 2024-12-11T23:02:16,266 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:35898 2024-12-11T23:02:16,271 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733957780200_0010_01_000005 is : 143 2024-12-11T23:02:16,280 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000005/launch_container.sh] 2024-12-11T23:02:16,280 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000005/container_tokens] 2024-12-11T23:02:16,280 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_1/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000005/sysfs] 2024-12-11T23:02:17,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 189b562ad19e67a2b978aec9d57dad01, had cached 0 bytes from a total of 14463 2024-12-11T23:02:17,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f2c62d44aedcf51b4978173adc0395ae, had cached 0 bytes from a total of 6088 2024-12-11T23:02:17,815 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T23:02:17,816 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T23:02:17,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-11T23:02:17,823 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T23:02:17,824 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T23:02:17,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T23:02:17,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-11T23:02:17,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-11T23:02:17,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T23:02:17,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-11T23:02:17,824 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958116030/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-11T23:02:17,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-11T23:02:17,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-11T23:02:17,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958137832"}]},"ts":"1733958137832"} 2024-12-11T23:02:17,834 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-11T23:02:17,834 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-11T23:02:17,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-11T23:02:17,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, UNASSIGN}] 2024-12-11T23:02:17,837 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, UNASSIGN 2024-12-11T23:02:17,837 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, UNASSIGN 2024-12-11T23:02:17,837 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=18f6027bec6e6407f29328b479aa7efc, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:02:17,837 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=CLOSING, regionLocation=b762025f20c5,33867,1733957770938 2024-12-11T23:02:17,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, UNASSIGN because future has completed 2024-12-11T23:02:17,841 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:02:17,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,33867,1733957770938}] 2024-12-11T23:02:17,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, UNASSIGN because future has completed 2024-12-11T23:02:17,842 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:02:17,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 18f6027bec6e6407f29328b479aa7efc, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:02:17,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-11T23:02:17,994 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:02:17,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:02:17,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing b8cf674e6e4898a7bd768ade4bf1a10a, disabling compactions & flushes 2024-12-11T23:02:17,994 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:02:17,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:02:17,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. after waiting 0 ms 2024-12-11T23:02:17,994 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:02:17,996 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:02:17,996 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:02:17,996 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing 18f6027bec6e6407f29328b479aa7efc, disabling compactions & flushes 2024-12-11T23:02:17,996 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:02:17,996 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:02:17,996 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. after waiting 0 ms 2024-12-11T23:02:17,996 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:02:17,998 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-12-11T23:02:17,999 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:02:17,999 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a. 2024-12-11T23:02:17,999 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for b8cf674e6e4898a7bd768ade4bf1a10a: Waiting for close lock at 1733958137994Running coprocessor pre-close hooks at 1733958137994Disabling compacts and flushes for region at 1733958137994Disabling writes for close at 1733958137994Writing region close event to WAL at 1733958137995 (+1 ms)Running coprocessor post-close hooks at 1733958137999 (+4 ms)Closed at 1733958137999 2024-12-11T23:02:17,999 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:02:18,000 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:02:18,000 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc. 2024-12-11T23:02:18,000 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for 18f6027bec6e6407f29328b479aa7efc: Waiting for close lock at 1733958137996Running coprocessor pre-close hooks at 1733958137996Disabling compacts and flushes for region at 1733958137996Disabling writes for close at 1733958137996Writing region close event to WAL at 1733958137997 (+1 ms)Running coprocessor post-close hooks at 1733958138000 (+3 ms)Closed at 1733958138000 2024-12-11T23:02:18,001 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:02:18,002 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=b8cf674e6e4898a7bd768ade4bf1a10a, regionState=CLOSED 2024-12-11T23:02:18,002 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed 18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:02:18,002 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=18f6027bec6e6407f29328b479aa7efc, regionState=CLOSED 2024-12-11T23:02:18,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,33867,1733957770938 because future has completed 2024-12-11T23:02:18,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 18f6027bec6e6407f29328b479aa7efc, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:02:18,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=229 2024-12-11T23:02:18,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=230 2024-12-11T23:02:18,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure 18f6027bec6e6407f29328b479aa7efc, server=b762025f20c5,41635,1733957771023 in 163 msec 2024-12-11T23:02:18,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure b8cf674e6e4898a7bd768ade4bf1a10a, server=b762025f20c5,33867,1733957770938 in 164 msec 2024-12-11T23:02:18,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b8cf674e6e4898a7bd768ade4bf1a10a, UNASSIGN in 170 msec 2024-12-11T23:02:18,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=230, resume processing ppid=228 2024-12-11T23:02:18,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=18f6027bec6e6407f29328b479aa7efc, UNASSIGN in 170 msec 2024-12-11T23:02:18,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-11T23:02:18,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 173 msec 2024-12-11T23:02:18,010 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958138010"}]},"ts":"1733958138010"} 2024-12-11T23:02:18,012 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-11T23:02:18,012 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-11T23:02:18,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 182 msec 2024-12-11T23:02:18,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-11T23:02:18,145 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T23:02:18,145 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-11T23:02:18,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:18,147 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:18,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-11T23:02:18,148 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:18,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-11T23:02:18,167 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:02:18,168 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:02:18,173 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/recovered.edits] 2024-12-11T23:02:18,175 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/recovered.edits] 2024-12-11T23:02:18,189 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/cf/56c3e252abdf47f4ad85b8f6b0063295 2024-12-11T23:02:18,191 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/cf/3f3126521b374e368bb31b0f28005320 2024-12-11T23:02:18,197 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/recovered.edits/12.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a/recovered.edits/12.seqid 2024-12-11T23:02:18,197 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc/recovered.edits/9.seqid 2024-12-11T23:02:18,197 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:02:18,197 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportWithChecksum/18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:02:18,197 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-11T23:02:18,198 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-11T23:02:18,199 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-11T23:02:18,228 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241211de286b03ebbd4ef9b8452d5de8ca329c_b8cf674e6e4898a7bd768ade4bf1a10a 2024-12-11T23:02:18,228 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241211a135156a89cc4ee2887fe95b36acaf67_18f6027bec6e6407f29328b479aa7efc 2024-12-11T23:02:18,229 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-11T23:02:18,231 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:18,242 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-11T23:02:18,244 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-11T23:02:18,246 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:18,246 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-11T23:02:18,247 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958138246"}]},"ts":"9223372036854775807"} 2024-12-11T23:02:18,247 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958138246"}]},"ts":"9223372036854775807"} 2024-12-11T23:02:18,250 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T23:02:18,250 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b8cf674e6e4898a7bd768ade4bf1a10a, NAME => 'testtb-testExportWithChecksum,,1733958071827.b8cf674e6e4898a7bd768ade4bf1a10a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 18f6027bec6e6407f29328b479aa7efc, NAME => 'testtb-testExportWithChecksum,1,1733958071827.18f6027bec6e6407f29328b479aa7efc.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T23:02:18,250 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-11T23:02:18,250 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958138250"}]},"ts":"9223372036854775807"} 2024-12-11T23:02:18,253 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-11T23:02:18,255 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T23:02:18,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 110 msec 2024-12-11T23:02:18,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T23:02:18,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T23:02:18,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T23:02:18,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-11T23:02:18,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,294 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-11T23:02:18,294 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T23:02:18,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-11T23:02:18,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-11T23:02:18,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-11T23:02:18,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-11T23:02:18,331 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=817 (was 811) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_728995594_1 at /127.0.0.1:40388 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_728995594_1 at /127.0.0.1:42216 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1824557564_22 at /127.0.0.1:40400 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:34946 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 147472) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:37141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-9111 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:45212 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:34944 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=797 (was 797), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=759 (was 618) - SystemLoadAverage LEAK? -, ProcessCount=28 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=6674 (was 8297) 2024-12-11T23:02:18,332 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-11T23:02:18,356 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=817, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=759, ProcessCount=28, AvailableMemoryMB=6672 2024-12-11T23:02:18,356 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-11T23:02:18,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T23:02:18,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:18,362 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T23:02:18,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-12-11T23:02:18,365 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T23:02:18,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-11T23:02:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742417_1593 (size=454) 2024-12-11T23:02:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742417_1593 (size=454) 2024-12-11T23:02:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742417_1593 (size=454) 2024-12-11T23:02:18,412 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 883cb9411812424601639d54a1542515, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:02:18,414 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0e8295e1189a83034c0eff7fe9c9061a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:02:18,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742419_1595 (size=79) 2024-12-11T23:02:18,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742419_1595 (size=79) 2024-12-11T23:02:18,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742419_1595 (size=79) 2024-12-11T23:02:18,431 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:02:18,431 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 0e8295e1189a83034c0eff7fe9c9061a, disabling compactions & flushes 2024-12-11T23:02:18,431 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,431 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,431 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. after waiting 0 ms 2024-12-11T23:02:18,431 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,431 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,431 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0e8295e1189a83034c0eff7fe9c9061a: Waiting for close lock at 1733958138431Disabling compacts and flushes for region at 1733958138431Disabling writes for close at 1733958138431Writing region close event to WAL at 1733958138431Closed at 1733958138431 2024-12-11T23:02:18,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742418_1594 (size=79) 2024-12-11T23:02:18,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742418_1594 (size=79) 2024-12-11T23:02:18,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742418_1594 (size=79) 2024-12-11T23:02:18,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:02:18,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 883cb9411812424601639d54a1542515, disabling compactions & flushes 2024-12-11T23:02:18,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. after waiting 0 ms 2024-12-11T23:02:18,441 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,442 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,442 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 883cb9411812424601639d54a1542515: Waiting for close lock at 1733958138441Disabling compacts and flushes for region at 1733958138441Disabling writes for close at 1733958138441Writing region close event to WAL at 1733958138441Closed at 1733958138441 2024-12-11T23:02:18,443 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T23:02:18,444 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733958138443"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958138443"}]},"ts":"1733958138443"} 2024-12-11T23:02:18,444 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733958138443"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733958138443"}]},"ts":"1733958138443"} 2024-12-11T23:02:18,447 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T23:02:18,450 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T23:02:18,451 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958138450"}]},"ts":"1733958138450"} 2024-12-11T23:02:18,453 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-11T23:02:18,453 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T23:02:18,455 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T23:02:18,455 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T23:02:18,455 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T23:02:18,455 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T23:02:18,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, ASSIGN}] 2024-12-11T23:02:18,457 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, ASSIGN 2024-12-11T23:02:18,457 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, ASSIGN 2024-12-11T23:02:18,458 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, ASSIGN; state=OFFLINE, location=b762025f20c5,41635,1733957771023; forceNewPlan=false, retain=false 2024-12-11T23:02:18,458 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, ASSIGN; state=OFFLINE, location=b762025f20c5,37563,1733957770731; forceNewPlan=false, retain=false 2024-12-11T23:02:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-11T23:02:18,609 INFO [b762025f20c5:42043 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T23:02:18,609 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=0e8295e1189a83034c0eff7fe9c9061a, regionState=OPENING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:02:18,610 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=883cb9411812424601639d54a1542515, regionState=OPENING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:02:18,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, ASSIGN because future has completed 2024-12-11T23:02:18,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:02:18,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, ASSIGN because future has completed 2024-12-11T23:02:18,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 883cb9411812424601639d54a1542515, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:02:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-11T23:02:18,778 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,778 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e8295e1189a83034c0eff7fe9c9061a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T23:02:18,779 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. service=AccessControlService 2024-12-11T23:02:18,779 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:02:18,779 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,779 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:02:18,779 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,779 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,781 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,782 INFO [StoreOpener-0e8295e1189a83034c0eff7fe9c9061a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,782 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => 883cb9411812424601639d54a1542515, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T23:02:18,782 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. service=AccessControlService 2024-12-11T23:02:18,782 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T23:02:18,782 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,782 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T23:02:18,783 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,783 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,783 INFO [StoreOpener-0e8295e1189a83034c0eff7fe9c9061a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e8295e1189a83034c0eff7fe9c9061a columnFamilyName cf 2024-12-11T23:02:18,784 INFO [StoreOpener-883cb9411812424601639d54a1542515-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,785 INFO [StoreOpener-883cb9411812424601639d54a1542515-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 883cb9411812424601639d54a1542515 columnFamilyName cf 2024-12-11T23:02:18,786 DEBUG [StoreOpener-883cb9411812424601639d54a1542515-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:02:18,787 INFO [StoreOpener-883cb9411812424601639d54a1542515-1 {}] regionserver.HStore(327): Store=883cb9411812424601639d54a1542515/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:02:18,787 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,788 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515 2024-12-11T23:02:18,788 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515 2024-12-11T23:02:18,789 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,789 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,790 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,793 DEBUG [StoreOpener-0e8295e1189a83034c0eff7fe9c9061a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:02:18,793 INFO [StoreOpener-0e8295e1189a83034c0eff7fe9c9061a-1 {}] regionserver.HStore(327): Store=0e8295e1189a83034c0eff7fe9c9061a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T23:02:18,794 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:02:18,794 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,794 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened 883cb9411812424601639d54a1542515; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74541309, jitterRate=0.11075206100940704}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:02:18,794 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 883cb9411812424601639d54a1542515 2024-12-11T23:02:18,795 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for 883cb9411812424601639d54a1542515: Running coprocessor pre-open hook at 1733958138783Writing region info on filesystem at 1733958138783Initializing all the Stores at 1733958138784 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958138784Cleaning up temporary data from old regions at 1733958138789 (+5 ms)Running coprocessor post-open hooks at 1733958138794 (+5 ms)Region opened successfully at 1733958138795 (+1 ms) 2024-12-11T23:02:18,798 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,798 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515., pid=238, masterSystemTime=1733958138779 2024-12-11T23:02:18,798 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,798 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,798 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,800 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,800 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:18,800 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,800 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=883cb9411812424601639d54a1542515, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:02:18,801 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b762025f20c5,37563,1733957770731, table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T23:02:18,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure 883cb9411812424601639d54a1542515, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:02:18,803 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T23:02:18,803 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened 0e8295e1189a83034c0eff7fe9c9061a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68962261, jitterRate=0.027617767453193665}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T23:02:18,804 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:18,804 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for 0e8295e1189a83034c0eff7fe9c9061a: Running coprocessor pre-open hook at 1733958138779Writing region info on filesystem at 1733958138779Initializing all the Stores at 1733958138781 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733958138781Cleaning up temporary data from old regions at 1733958138798 (+17 ms)Running coprocessor post-open hooks at 1733958138804 (+6 ms)Region opened successfully at 1733958138804 2024-12-11T23:02:18,805 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a., pid=237, masterSystemTime=1733958138775 2024-12-11T23:02:18,806 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,806 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:18,807 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=0e8295e1189a83034c0eff7fe9c9061a, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:02:18,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=235 2024-12-11T23:02:18,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure 883cb9411812424601639d54a1542515, server=b762025f20c5,37563,1733957770731 in 180 msec 2024-12-11T23:02:18,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, ASSIGN in 352 msec 2024-12-11T23:02:18,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:02:18,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=236 2024-12-11T23:02:18,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a, server=b762025f20c5,41635,1733957771023 in 197 msec 2024-12-11T23:02:18,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=236, resume processing ppid=234 2024-12-11T23:02:18,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, ASSIGN in 356 msec 2024-12-11T23:02:18,814 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T23:02:18,814 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958138814"}]},"ts":"1733958138814"} 2024-12-11T23:02:18,816 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-11T23:02:18,816 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T23:02:18,817 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-11T23:02:18,820 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T23:02:18,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:18,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,843 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:18,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 484 msec 2024-12-11T23:02:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-11T23:02:18,995 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T23:02:18,996 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:19,001 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:19,002 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:19,002 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:02:19,015 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:19,025 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:19,037 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:19,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T23:02:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958139041 (current time:1733958139041). 2024-12-11T23:02:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:02:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-11T23:02:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:02:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b4ba074, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:02:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:02:19,045 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:02:19,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:02:19,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:02:19,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35e975a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:02:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:02:19,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:19,048 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:02:19,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f01dd96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:02:19,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:02:19,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:02:19,051 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:02:19,058 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:02:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:02:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:19,059 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:02:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55afd7bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:02:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:02:19,077 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:02:19,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:02:19,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:02:19,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fce38db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:19,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:02:19,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:02:19,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:19,079 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43710, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:02:19,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@559014fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:02:19,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:02:19,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:02:19,085 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:02:19,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:02:19,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:02:19,089 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:02:19,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043. 2024-12-11T23:02:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:02:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:19,091 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:02:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T23:02:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:02:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T23:02:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-11T23:02:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T23:02:19,096 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:02:19,097 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:02:19,100 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:02:19,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742420_1596 (size=203) 2024-12-11T23:02:19,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742420_1596 (size=203) 2024-12-11T23:02:19,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742420_1596 (size=203) 2024-12-11T23:02:19,118 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:02:19,118 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a}] 2024-12-11T23:02:19,119 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:19,120 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515 2024-12-11T23:02:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T23:02:19,227 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000002/launch_container.sh] 2024-12-11T23:02:19,227 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000002/container_tokens] 2024-12-11T23:02:19,227 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000002/sysfs] 2024-12-11T23:02:19,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-11T23:02:19,274 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 0e8295e1189a83034c0eff7fe9c9061a: 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T23:02:19,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for 883cb9411812424601639d54a1542515: 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:19,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:02:19,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T23:02:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T23:02:19,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T23:02:20,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742421_1597 (size=82) 2024-12-11T23:02:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742422_1598 (size=82) 2024-12-11T23:02:20,105 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-11T23:02:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:20,119 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-11T23:02:20,120 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-11T23:02:20,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742422_1598 (size=82) 2024-12-11T23:02:20,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742421_1597 (size=82) 2024-12-11T23:02:20,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742421_1597 (size=82) 2024-12-11T23:02:20,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742422_1598 (size=82) 2024-12-11T23:02:20,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:20,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-11T23:02:20,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:20,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-11T23:02:20,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-11T23:02:20,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-11T23:02:20,196 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:20,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 883cb9411812424601639d54a1542515 2024-12-11T23:02:20,196 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:20,196 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515 2024-12-11T23:02:20,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a in 1.0790 sec 2024-12-11T23:02:20,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-11T23:02:20,199 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:02:20,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515 in 1.0790 sec 2024-12-11T23:02:20,200 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:02:20,201 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:02:20,201 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:02:20,201 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:02:20,201 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T23:02:20,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742423_1599 (size=74) 2024-12-11T23:02:20,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742423_1599 (size=74) 2024-12-11T23:02:20,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742423_1599 (size=74) 2024-12-11T23:02:20,208 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:02:20,208 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:20,208 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:20,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T23:02:20,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742424_1600 (size=697) 2024-12-11T23:02:20,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742424_1600 (size=697) 2024-12-11T23:02:20,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742424_1600 (size=697) 2024-12-11T23:02:20,260 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:02:20,264 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:02:20,265 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:20,266 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:02:20,267 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-11T23:02:20,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 1.1750 sec 2024-12-11T23:02:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T23:02:21,245 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T23:02:21,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37563 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:02:21,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41635 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T23:02:21,259 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:21,262 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,262 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:21,263 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T23:02:21,265 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:21,273 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:21,283 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T23:02:21,289 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T23:02:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733958141289 (current time:1733958141289). 2024-12-11T23:02:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T23:02:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-11T23:02:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T23:02:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15a1fccb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:02:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:02:21,291 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:02:21,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:02:21,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:02:21,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6608239f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:21,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:02:21,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:02:21,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:21,293 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:02:21,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c4ce9d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:02:21,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:02:21,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:02:21,295 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54828, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:02:21,297 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:02:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:02:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:21,298 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:02:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@201b1526, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,42043,-1 for getting cluster id 2024-12-11T23:02:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T23:02:21,299 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a632e75d-555c-4227-9b6b-9c0477277eba' 2024-12-11T23:02:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T23:02:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a632e75d-555c-4227-9b6b-9c0477277eba" 2024-12-11T23:02:21,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b021da5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:21,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,42043,-1] 2024-12-11T23:02:21,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T23:02:21,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:21,301 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T23:02:21,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e08d219, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T23:02:21,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T23:02:21,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,41635,1733957771023, seqNum=-1] 2024-12-11T23:02:21,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:02:21,303 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:02:21,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., hostname=b762025f20c5,37563,1733957770731, seqNum=2] 2024-12-11T23:02:21,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T23:02:21,306 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T23:02:21,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043. 2024-12-11T23:02:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T23:02:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:02:21,308 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:02:21,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T23:02:21,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T23:02:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T23:02:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-11T23:02:21,313 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T23:02:21,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T23:02:21,314 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T23:02:21,316 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T23:02:21,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742425_1601 (size=198) 2024-12-11T23:02:21,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742425_1601 (size=198) 2024-12-11T23:02:21,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742425_1601 (size=198) 2024-12-11T23:02:21,335 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T23:02:21,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a}] 2024-12-11T23:02:21,337 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515 2024-12-11T23:02:21,337 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T23:02:21,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41635 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-11T23:02:21,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37563 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-11T23:02:21,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:21,495 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing 883cb9411812424601639d54a1542515 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T23:02:21,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:21,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing 0e8295e1189a83034c0eff7fe9c9061a 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T23:02:21,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a is 71, key is 15c578eb278fb29ce9f9686cceb10e81/cf:q/1733958141258/Put/seqid=0 2024-12-11T23:02:21,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515 is 71, key is 0548f31acd97b9c88886753f993a80a1/cf:q/1733958141256/Put/seqid=0 2024-12-11T23:02:21,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742426_1602 (size=8101) 2024-12-11T23:02:21,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742426_1602 (size=8101) 2024-12-11T23:02:21,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742426_1602 (size=8101) 2024-12-11T23:02:21,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:02:21,574 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:21,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/.tmp/cf/c452e1e211014d29b946e2ff725c03f8, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=0e8295e1189a83034c0eff7fe9c9061a] 2024-12-11T23:02:21,576 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/.tmp/cf/c452e1e211014d29b946e2ff725c03f8 is 220, key is 17cd120419a42541043f1d10d7232ff4d/cf:q/1733958141258/Put/seqid=0 2024-12-11T23:02:21,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742427_1603 (size=5172) 2024-12-11T23:02:21,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742427_1603 (size=5172) 2024-12-11T23:02:21,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742427_1603 (size=5172) 2024-12-11T23:02:21,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:02:21,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515 2024-12-11T23:02:21,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/.tmp/cf/f6f02a3d2268440ca177ff60b3a4932d, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=883cb9411812424601639d54a1542515] 2024-12-11T23:02:21,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/.tmp/cf/f6f02a3d2268440ca177ff60b3a4932d is 220, key is 0237790f38e4f145ae78dc57c831632e6/cf:q/1733958141256/Put/seqid=0 2024-12-11T23:02:21,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742428_1604 (size=15309) 2024-12-11T23:02:21,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742428_1604 (size=15309) 2024-12-11T23:02:21,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742428_1604 (size=15309) 2024-12-11T23:02:21,589 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/.tmp/cf/c452e1e211014d29b946e2ff725c03f8 2024-12-11T23:02:21,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/.tmp/cf/c452e1e211014d29b946e2ff725c03f8 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf/c452e1e211014d29b946e2ff725c03f8 2024-12-11T23:02:21,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742429_1605 (size=6178) 2024-12-11T23:02:21,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742429_1605 (size=6178) 2024-12-11T23:02:21,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742429_1605 (size=6178) 2024-12-11T23:02:21,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/.tmp/cf/f6f02a3d2268440ca177ff60b3a4932d 2024-12-11T23:02:21,601 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf/c452e1e211014d29b946e2ff725c03f8, entries=46, sequenceid=6, filesize=15.0 K 2024-12-11T23:02:21,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 0e8295e1189a83034c0eff7fe9c9061a in 106ms, sequenceid=6, compaction requested=false 2024-12-11T23:02:21,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 0e8295e1189a83034c0eff7fe9c9061a: 2024-12-11T23:02:21,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T23:02:21,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:02:21,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf/c452e1e211014d29b946e2ff725c03f8] hfiles 2024-12-11T23:02:21,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf/c452e1e211014d29b946e2ff725c03f8 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/.tmp/cf/f6f02a3d2268440ca177ff60b3a4932d as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf/f6f02a3d2268440ca177ff60b3a4932d 2024-12-11T23:02:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742430_1606 (size=121) 2024-12-11T23:02:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742430_1606 (size=121) 2024-12-11T23:02:21,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742430_1606 (size=121) 2024-12-11T23:02:21,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:21,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-11T23:02:21,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-11T23:02:21,613 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:21,613 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:21,614 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf/f6f02a3d2268440ca177ff60b3a4932d, entries=4, sequenceid=6, filesize=6.0 K 2024-12-11T23:02:21,614 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 883cb9411812424601639d54a1542515 in 119ms, sequenceid=6, compaction requested=false 2024-12-11T23:02:21,614 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a in 278 msec 2024-12-11T23:02:21,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 883cb9411812424601639d54a1542515: 2024-12-11T23:02:21,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T23:02:21,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T23:02:21,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf/f6f02a3d2268440ca177ff60b3a4932d] hfiles 2024-12-11T23:02:21,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf/f6f02a3d2268440ca177ff60b3a4932d for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742431_1607 (size=121) 2024-12-11T23:02:21,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742431_1607 (size=121) 2024-12-11T23:02:21,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742431_1607 (size=121) 2024-12-11T23:02:21,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:21,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-11T23:02:21,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-11T23:02:21,621 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 883cb9411812424601639d54a1542515 2024-12-11T23:02:21,621 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515 2024-12-11T23:02:21,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-11T23:02:21,627 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T23:02:21,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 883cb9411812424601639d54a1542515 in 287 msec 2024-12-11T23:02:21,628 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T23:02:21,631 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T23:02:21,631 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T23:02:21,631 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T23:02:21,632 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515] hfiles 2024-12-11T23:02:21,632 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:21,632 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515 2024-12-11T23:02:21,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T23:02:21,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742432_1608 (size=305) 2024-12-11T23:02:21,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742432_1608 (size=305) 2024-12-11T23:02:21,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742432_1608 (size=305) 2024-12-11T23:02:21,640 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T23:02:21,640 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,640 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742433_1609 (size=1007) 2024-12-11T23:02:21,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742433_1609 (size=1007) 2024-12-11T23:02:21,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742433_1609 (size=1007) 2024-12-11T23:02:21,663 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T23:02:21,674 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T23:02:21,674 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,676 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T23:02:21,676 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-11T23:02:21,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 365 msec 2024-12-11T23:02:21,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T23:02:21,946 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T23:02:21,946 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946 2024-12-11T23:02:21,946 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45807, tgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946, rawTgtDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946, srcFsUri=hdfs://localhost:45807, srcDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:02:21,978 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45807, inputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f 2024-12-11T23:02:21,978 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,979 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T23:02:21,983 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:21,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742435_1611 (size=198) 2024-12-11T23:02:21,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742434_1610 (size=1007) 2024-12-11T23:02:21,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742434_1610 (size=1007) 2024-12-11T23:02:21,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742435_1611 (size=198) 2024-12-11T23:02:21,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742434_1610 (size=1007) 2024-12-11T23:02:21,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742435_1611 (size=198) 2024-12-11T23:02:21,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:21,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:21,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0010_000001 (auth:SIMPLE) from 127.0.0.1:34054 2024-12-11T23:02:22,351 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000001/launch_container.sh] 2024-12-11T23:02:22,351 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000001/container_tokens] 2024-12-11T23:02:22,351 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0010/container_1733957780200_0010_01_000001/sysfs] 2024-12-11T23:02:22,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-12244779678750791816.jar 2024-12-11T23:02:22,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop-3429539542790409677.jar 2024-12-11T23:02:22,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T23:02:22,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T23:02:22,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T23:02:22,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T23:02:22,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T23:02:22,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T23:02:22,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T23:02:22,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:02:22,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:02:22,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:02:22,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:02:22,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T23:02:22,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:02:22,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T23:02:22,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742436_1612 (size=131440) 2024-12-11T23:02:22,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742436_1612 (size=131440) 2024-12-11T23:02:22,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742436_1612 (size=131440) 2024-12-11T23:02:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742437_1613 (size=4188619) 2024-12-11T23:02:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742437_1613 (size=4188619) 2024-12-11T23:02:22,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742437_1613 (size=4188619) 2024-12-11T23:02:22,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742438_1614 (size=1323991) 2024-12-11T23:02:22,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742438_1614 (size=1323991) 2024-12-11T23:02:22,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742438_1614 (size=1323991) 2024-12-11T23:02:22,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742439_1615 (size=903541) 2024-12-11T23:02:22,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742439_1615 (size=903541) 2024-12-11T23:02:22,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742439_1615 (size=903541) 2024-12-11T23:02:22,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742440_1616 (size=8360360) 2024-12-11T23:02:22,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742440_1616 (size=8360360) 2024-12-11T23:02:22,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742440_1616 (size=8360360) 2024-12-11T23:02:22,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742441_1617 (size=1877034) 2024-12-11T23:02:22,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742441_1617 (size=1877034) 2024-12-11T23:02:22,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742441_1617 (size=1877034) 2024-12-11T23:02:22,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742442_1618 (size=77835) 2024-12-11T23:02:22,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742442_1618 (size=77835) 2024-12-11T23:02:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742442_1618 (size=77835) 2024-12-11T23:02:23,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742443_1619 (size=30949) 2024-12-11T23:02:23,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742443_1619 (size=30949) 2024-12-11T23:02:23,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742443_1619 (size=30949) 2024-12-11T23:02:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742444_1620 (size=1597213) 2024-12-11T23:02:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742444_1620 (size=1597213) 2024-12-11T23:02:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742444_1620 (size=1597213) 2024-12-11T23:02:23,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742445_1621 (size=443172) 2024-12-11T23:02:23,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742445_1621 (size=443172) 2024-12-11T23:02:23,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742445_1621 (size=443172) 2024-12-11T23:02:23,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742446_1622 (size=4695811) 2024-12-11T23:02:23,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742446_1622 (size=4695811) 2024-12-11T23:02:23,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742446_1622 (size=4695811) 2024-12-11T23:02:23,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742447_1623 (size=232957) 2024-12-11T23:02:23,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742447_1623 (size=232957) 2024-12-11T23:02:23,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742447_1623 (size=232957) 2024-12-11T23:02:23,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742448_1624 (size=127628) 2024-12-11T23:02:23,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742448_1624 (size=127628) 2024-12-11T23:02:23,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742448_1624 (size=127628) 2024-12-11T23:02:23,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742449_1625 (size=20406) 2024-12-11T23:02:23,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742449_1625 (size=20406) 2024-12-11T23:02:23,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742449_1625 (size=20406) 2024-12-11T23:02:23,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742450_1626 (size=5175431) 2024-12-11T23:02:23,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742450_1626 (size=5175431) 2024-12-11T23:02:23,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742450_1626 (size=5175431) 2024-12-11T23:02:23,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742451_1627 (size=217634) 2024-12-11T23:02:23,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742451_1627 (size=217634) 2024-12-11T23:02:23,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742451_1627 (size=217634) 2024-12-11T23:02:23,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742452_1628 (size=1832290) 2024-12-11T23:02:23,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742452_1628 (size=1832290) 2024-12-11T23:02:23,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742452_1628 (size=1832290) 2024-12-11T23:02:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742453_1629 (size=322274) 2024-12-11T23:02:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742453_1629 (size=322274) 2024-12-11T23:02:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742453_1629 (size=322274) 2024-12-11T23:02:23,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742454_1630 (size=503880) 2024-12-11T23:02:23,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742454_1630 (size=503880) 2024-12-11T23:02:23,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742454_1630 (size=503880) 2024-12-11T23:02:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742455_1631 (size=29229) 2024-12-11T23:02:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742455_1631 (size=29229) 2024-12-11T23:02:23,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742455_1631 (size=29229) 2024-12-11T23:02:23,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742456_1632 (size=24096) 2024-12-11T23:02:23,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742456_1632 (size=24096) 2024-12-11T23:02:23,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742456_1632 (size=24096) 2024-12-11T23:02:23,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742457_1633 (size=111872) 2024-12-11T23:02:23,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742457_1633 (size=111872) 2024-12-11T23:02:23,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742457_1633 (size=111872) 2024-12-11T23:02:23,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742458_1634 (size=6426222) 2024-12-11T23:02:23,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742458_1634 (size=6426222) 2024-12-11T23:02:23,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742458_1634 (size=6426222) 2024-12-11T23:02:23,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742459_1635 (size=45609) 2024-12-11T23:02:23,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742459_1635 (size=45609) 2024-12-11T23:02:23,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742459_1635 (size=45609) 2024-12-11T23:02:23,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742460_1636 (size=136454) 2024-12-11T23:02:23,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742460_1636 (size=136454) 2024-12-11T23:02:23,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742460_1636 (size=136454) 2024-12-11T23:02:23,213 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T23:02:23,215 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-11T23:02:23,216 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.0 K 2024-12-11T23:02:23,216 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-11T23:02:23,216 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-11T23:02:23,216 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-11T23:02:23,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742461_1637 (size=1079) 2024-12-11T23:02:23,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742461_1637 (size=1079) 2024-12-11T23:02:23,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742461_1637 (size=1079) 2024-12-11T23:02:23,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742462_1638 (size=35) 2024-12-11T23:02:23,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742462_1638 (size=35) 2024-12-11T23:02:23,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742462_1638 (size=35) 2024-12-11T23:02:23,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742463_1639 (size=304168) 2024-12-11T23:02:23,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742463_1639 (size=304168) 2024-12-11T23:02:23,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742463_1639 (size=304168) 2024-12-11T23:02:23,263 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:02:23,263 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T23:02:23,280 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:59508 2024-12-11T23:02:23,521 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:02:27,413 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:35400 2024-12-11T23:02:27,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742464_1640 (size=349890) 2024-12-11T23:02:27,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742464_1640 (size=349890) 2024-12-11T23:02:27,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742464_1640 (size=349890) 2024-12-11T23:02:29,615 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:53550 2024-12-11T23:02:29,615 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:38476 2024-12-11T23:02:30,503 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:53560 2024-12-11T23:02:30,506 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:38478 2024-12-11T23:02:33,346 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733957780200_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T23:02:34,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742465_1641 (size=15309) 2024-12-11T23:02:34,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742465_1641 (size=15309) 2024-12-11T23:02:34,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742465_1641 (size=15309) 2024-12-11T23:02:34,348 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000002/launch_container.sh] 2024-12-11T23:02:34,348 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000002/container_tokens] 2024-12-11T23:02:34,348 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000002/sysfs] 2024-12-11T23:02:34,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742467_1643 (size=8101) 2024-12-11T23:02:34,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742467_1643 (size=8101) 2024-12-11T23:02:34,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742467_1643 (size=8101) 2024-12-11T23:02:35,205 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000003/launch_container.sh] 2024-12-11T23:02:35,205 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000003/container_tokens] 2024-12-11T23:02:35,205 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_1/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000003/sysfs] 2024-12-11T23:02:35,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742468_1644 (size=6178) 2024-12-11T23:02:35,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742468_1644 (size=6178) 2024-12-11T23:02:35,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742468_1644 (size=6178) 2024-12-11T23:02:35,789 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000004/launch_container.sh] 2024-12-11T23:02:35,789 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000004/container_tokens] 2024-12-11T23:02:35,789 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_3/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000004/sysfs] 2024-12-11T23:02:35,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742469_1645 (size=5172) 2024-12-11T23:02:35,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742469_1645 (size=5172) 2024-12-11T23:02:35,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742469_1645 (size=5172) 2024-12-11T23:02:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742466_1642 (size=31803) 2024-12-11T23:02:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742466_1642 (size=31803) 2024-12-11T23:02:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742466_1642 (size=31803) 2024-12-11T23:02:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742470_1646 (size=477) 2024-12-11T23:02:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742470_1646 (size=477) 2024-12-11T23:02:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742470_1646 (size=477) 2024-12-11T23:02:35,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742471_1647 (size=31803) 2024-12-11T23:02:35,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742471_1647 (size=31803) 2024-12-11T23:02:35,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742471_1647 (size=31803) 2024-12-11T23:02:35,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742472_1648 (size=349890) 2024-12-11T23:02:35,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742472_1648 (size=349890) 2024-12-11T23:02:35,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742472_1648 (size=349890) 2024-12-11T23:02:36,001 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:53562 2024-12-11T23:02:36,010 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733957780200_0011_000001 (auth:SIMPLE) from 127.0.0.1:38488 2024-12-11T23:02:36,022 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000005/launch_container.sh] 2024-12-11T23:02:36,022 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000005/container_tokens] 2024-12-11T23:02:36,022 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-0_2/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000005/sysfs] 2024-12-11T23:02:37,644 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T23:02:37,644 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T23:02:37,650 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,650 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T23:02:37,650 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T23:02:37,650 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,651 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-11T23:02:37,651 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-11T23:02:37,651 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-2117180003_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,651 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-11T23:02:37,651 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/export-test/export-1733958141946/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-11T23:02:37,656 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-11T23:02:37,658 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958157658"}]},"ts":"1733958157658"} 2024-12-11T23:02:37,659 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-11T23:02:37,659 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-11T23:02:37,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-11T23:02:37,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, UNASSIGN}] 2024-12-11T23:02:37,661 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, UNASSIGN 2024-12-11T23:02:37,661 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, UNASSIGN 2024-12-11T23:02:37,662 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=883cb9411812424601639d54a1542515, regionState=CLOSING, regionLocation=b762025f20c5,37563,1733957770731 2024-12-11T23:02:37,662 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=0e8295e1189a83034c0eff7fe9c9061a, regionState=CLOSING, regionLocation=b762025f20c5,41635,1733957771023 2024-12-11T23:02:37,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, UNASSIGN because future has completed 2024-12-11T23:02:37,663 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:02:37,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 883cb9411812424601639d54a1542515, server=b762025f20c5,37563,1733957770731}] 2024-12-11T23:02:37,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, UNASSIGN because future has completed 2024-12-11T23:02:37,664 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T23:02:37,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a, server=b762025f20c5,41635,1733957771023}] 2024-12-11T23:02:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-11T23:02:37,816 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close 883cb9411812424601639d54a1542515 2024-12-11T23:02:37,816 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:02:37,817 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing 883cb9411812424601639d54a1542515, disabling compactions & flushes 2024-12-11T23:02:37,817 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:37,817 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:37,817 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. after waiting 0 ms 2024-12-11T23:02:37,817 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:37,817 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:37,817 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T23:02:37,818 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing 0e8295e1189a83034c0eff7fe9c9061a, disabling compactions & flushes 2024-12-11T23:02:37,818 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:37,818 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:37,818 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. after waiting 0 ms 2024-12-11T23:02:37,818 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:37,825 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:02:37,826 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:02:37,826 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515. 2024-12-11T23:02:37,826 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for 883cb9411812424601639d54a1542515: Waiting for close lock at 1733958157816Running coprocessor pre-close hooks at 1733958157816Disabling compacts and flushes for region at 1733958157817 (+1 ms)Disabling writes for close at 1733958157817Writing region close event to WAL at 1733958157819 (+2 ms)Running coprocessor post-close hooks at 1733958157826 (+7 ms)Closed at 1733958157826 2024-12-11T23:02:37,826 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T23:02:37,827 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:02:37,827 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a. 2024-12-11T23:02:37,827 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for 0e8295e1189a83034c0eff7fe9c9061a: Waiting for close lock at 1733958157817Running coprocessor pre-close hooks at 1733958157817Disabling compacts and flushes for region at 1733958157818 (+1 ms)Disabling writes for close at 1733958157818Writing region close event to WAL at 1733958157820 (+2 ms)Running coprocessor post-close hooks at 1733958157827 (+7 ms)Closed at 1733958157827 2024-12-11T23:02:37,827 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed 883cb9411812424601639d54a1542515 2024-12-11T23:02:37,828 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=883cb9411812424601639d54a1542515, regionState=CLOSED 2024-12-11T23:02:37,828 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed 0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:37,828 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=0e8295e1189a83034c0eff7fe9c9061a, regionState=CLOSED 2024-12-11T23:02:37,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure 883cb9411812424601639d54a1542515, server=b762025f20c5,37563,1733957770731 because future has completed 2024-12-11T23:02:37,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a, server=b762025f20c5,41635,1733957771023 because future has completed 2024-12-11T23:02:37,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=247 2024-12-11T23:02:37,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure 883cb9411812424601639d54a1542515, server=b762025f20c5,37563,1733957770731 in 166 msec 2024-12-11T23:02:37,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=248 2024-12-11T23:02:37,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure 0e8295e1189a83034c0eff7fe9c9061a, server=b762025f20c5,41635,1733957771023 in 166 msec 2024-12-11T23:02:37,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=883cb9411812424601639d54a1542515, UNASSIGN in 169 msec 2024-12-11T23:02:37,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=248, resume processing ppid=246 2024-12-11T23:02:37,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0e8295e1189a83034c0eff7fe9c9061a, UNASSIGN in 170 msec 2024-12-11T23:02:37,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-11T23:02:37,833 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 172 msec 2024-12-11T23:02:37,834 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733958157834"}]},"ts":"1733958157834"} 2024-12-11T23:02:37,835 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-11T23:02:37,835 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-11T23:02:37,837 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 180 msec 2024-12-11T23:02:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-11T23:02:37,976 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T23:02:37,977 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,979 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,981 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,985 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37563 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:37,986 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515 2024-12-11T23:02:37,986 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:37,988 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/recovered.edits] 2024-12-11T23:02:37,988 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf, FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/recovered.edits] 2024-12-11T23:02:38,237 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf/c452e1e211014d29b946e2ff725c03f8 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/cf/c452e1e211014d29b946e2ff725c03f8 2024-12-11T23:02:38,237 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf/f6f02a3d2268440ca177ff60b3a4932d to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/cf/f6f02a3d2268440ca177ff60b3a4932d 2024-12-11T23:02:38,240 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a/recovered.edits/9.seqid 2024-12-11T23:02:38,240 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/recovered.edits/9.seqid to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515/recovered.edits/9.seqid 2024-12-11T23:02:38,241 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:38,241 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testtb-testExportFileSystemStateWithSkipTmp/883cb9411812424601639d54a1542515 2024-12-11T23:02:38,241 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-11T23:02:38,241 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-11T23:02:38,242 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-11T23:02:38,325 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241211662c5a67068041b1a3392f20332bbac8_0e8295e1189a83034c0eff7fe9c9061a 2024-12-11T23:02:38,325 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515 to hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412111e4c223eee2e4a8189fe0d4e24fa5ef2_883cb9411812424601639d54a1542515 2024-12-11T23:02:38,326 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-11T23:02:38,331 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,335 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-11T23:02:38,338 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-11T23:02:38,339 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,339 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-11T23:02:38,339 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958158339"}]},"ts":"9223372036854775807"} 2024-12-11T23:02:38,339 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733958158339"}]},"ts":"9223372036854775807"} 2024-12-11T23:02:38,341 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T23:02:38,341 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 883cb9411812424601639d54a1542515, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733958138358.883cb9411812424601639d54a1542515.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0e8295e1189a83034c0eff7fe9c9061a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T23:02:38,341 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-11T23:02:38,341 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733958158341"}]},"ts":"9223372036854775807"} 2024-12-11T23:02:38,342 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-11T23:02:38,343 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,344 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 366 msec 2024-12-11T23:02:38,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T23:02:38,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T23:02:38,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T23:02:38,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:38,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T23:02:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-11T23:02:38,480 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:38,480 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:38,481 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:38,481 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,481 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T23:02:38,481 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T23:02:38,485 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-11T23:02:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,487 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-11T23:02:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:38,505 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=822 (was 817) Potentially hanging thread: Thread-9970 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:51720 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:54026 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46105 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2117180003_22 at /127.0.0.1:60928 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:46105 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1218756954_1 at /127.0.0.1:51696 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 151050) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1218756954_1 at /127.0.0.1:54004 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=787 (was 797), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=694 (was 759), ProcessCount=22 (was 28), AvailableMemoryMB=6776 (was 6672) - AvailableMemoryMB LEAK? - 2024-12-11T23:02:38,505 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=822 is superior to 500 2024-12-11T23:02:38,506 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-11T23:02:38,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e88e382{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T23:02:38,514 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f0170a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:02:38,514 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:02:38,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e557003{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T23:02:38,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6efd04a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:02:38,525 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733957780200_0011_01_000001 is : 143 2024-12-11T23:02:38,534 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000001/launch_container.sh] 2024-12-11T23:02:38,534 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000001/container_tokens] 2024-12-11T23:02:38,534 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_481307181/yarn-3415068858/MiniMRCluster_481307181-localDir-nm-1_0/usercache/jenkins/appcache/application_1733957780200_0011/container_1733957780200_0011_01_000001/sysfs] 2024-12-11T23:02:38,686 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:02:40,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T23:02:43,352 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:02:55,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d2db3c7{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T23:02:55,542 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bf46fce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:02:55,542 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:02:55,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d9273c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T23:02:55,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@647f66d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:02:59,778 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 43379 2024-12-11T23:03:01,094 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83c7242f4212a8a5171da84f96da9bc6, had cached 0 bytes from a total of 5791 2024-12-11T23:03:02,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 189b562ad19e67a2b978aec9d57dad01, had cached 0 bytes from a total of 14463 2024-12-11T23:03:02,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f2c62d44aedcf51b4978173adc0395ae, had cached 0 bytes from a total of 6088 2024-12-11T23:03:08,687 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:03:12,555 ERROR [Thread[Thread-395,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T23:03:12,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@429c4453{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-11T23:03:12,556 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73ea0d15{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:03:12,556 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:03:12,556 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45c2a887{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T23:03:12,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c8c1793{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:03:12,560 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-11T23:03:12,565 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-11T23:03:12,565 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-11T23:03:12,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741830_1006 (size=1172726) 2024-12-11T23:03:12,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741830_1006 (size=1172726) 2024-12-11T23:03:12,568 ERROR [Thread[Thread-418,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T23:03:12,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cbd050f{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-11T23:03:12,572 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7dfd7ecf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:03:12,572 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:03:12,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5512af0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T23:03:12,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@176bba66{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:03:12,573 ERROR [Thread[Thread-377,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T23:03:12,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-11T23:03:12,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T23:03:12,573 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T23:03:12,573 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:03:12,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,574 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T23:03:12,574 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T23:03:12,574 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1960546110, stopped=false 2024-12-11T23:03:12,574 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,574 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T23:03:12,574 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b762025f20c5,42043,1733957769583 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:03:12,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:03:12,624 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T23:03:12,625 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T23:03:12,625 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:03:12,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:03:12,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:03:12,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,37563,1733957770731' ***** 2024-12-11T23:03:12,627 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,627 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:03:12,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:03:12,627 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,33867,1733957770938' ***** 2024-12-11T23:03:12,627 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,627 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:03:12,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T23:03:12,627 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,41635,1733957771023' ***** 2024-12-11T23:03:12,627 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,627 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T23:03:12,628 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:03:12,628 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:03:12,628 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T23:03:12,628 INFO [RS:0;b762025f20c5:37563 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:03:12,628 INFO [RS:2;b762025f20c5:41635 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:03:12,628 INFO [RS:1;b762025f20c5:33867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T23:03:12,628 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:03:12,628 INFO [RS:1;b762025f20c5:33867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:03:12,628 INFO [RS:0;b762025f20c5:37563 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:03:12,628 INFO [RS:2;b762025f20c5:41635 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T23:03:12,628 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:03:12,629 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,41635,1733957771023 2024-12-11T23:03:12,629 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T23:03:12,629 INFO [RS:2;b762025f20c5:41635 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:03:12,629 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(3091): Received CLOSE for 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:03:12,629 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(3091): Received CLOSE for f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:03:12,629 INFO [RS:2;b762025f20c5:41635 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b762025f20c5:41635. 2024-12-11T23:03:12,629 DEBUG [RS:2;b762025f20c5:41635 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:03:12,629 DEBUG [RS:2;b762025f20c5:41635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,629 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,33867,1733957770938 2024-12-11T23:03:12,629 INFO [RS:1;b762025f20c5:33867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:03:12,629 INFO [RS:2;b762025f20c5:41635 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:03:12,629 INFO [RS:1;b762025f20c5:33867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b762025f20c5:33867. 2024-12-11T23:03:12,629 INFO [RS:2;b762025f20c5:41635 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:03:12,629 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(3091): Received CLOSE for 83c7242f4212a8a5171da84f96da9bc6 2024-12-11T23:03:12,629 INFO [RS:2;b762025f20c5:41635 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:03:12,629 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,37563,1733957770731 2024-12-11T23:03:12,629 INFO [RS:0;b762025f20c5:37563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:03:12,629 DEBUG [RS:1;b762025f20c5:33867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:03:12,630 INFO [RS:0;b762025f20c5:37563 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b762025f20c5:37563. 2024-12-11T23:03:12,630 DEBUG [RS:1;b762025f20c5:33867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,630 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T23:03:12,630 DEBUG [RS:0;b762025f20c5:37563 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:03:12,630 DEBUG [RS:0;b762025f20c5:37563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,630 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-11T23:03:12,630 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T23:03:12,630 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1325): Online Regions={189b562ad19e67a2b978aec9d57dad01=testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01.} 2024-12-11T23:03:12,630 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1325): Online Regions={f2c62d44aedcf51b4978173adc0395ae=testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae., 83c7242f4212a8a5171da84f96da9bc6=hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6.} 2024-12-11T23:03:12,630 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T23:03:12,630 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 189b562ad19e67a2b978aec9d57dad01, disabling compactions & flushes 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f2c62d44aedcf51b4978173adc0395ae, disabling compactions & flushes 2024-12-11T23:03:12,630 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:03:12,630 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. after waiting 0 ms 2024-12-11T23:03:12,630 DEBUG [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1351): Waiting on 189b562ad19e67a2b978aec9d57dad01 2024-12-11T23:03:12,630 DEBUG [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T23:03:12,630 DEBUG [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1351): Waiting on 83c7242f4212a8a5171da84f96da9bc6, f2c62d44aedcf51b4978173adc0395ae 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. after waiting 0 ms 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:03:12,630 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T23:03:12,630 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T23:03:12,631 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T23:03:12,631 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T23:03:12,631 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T23:03:12,631 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=8.76 KB heapSize=14.76 KB 2024-12-11T23:03:12,645 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/f2c62d44aedcf51b4978173adc0395ae/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T23:03:12,645 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/default/testExportExpiredSnapshot/189b562ad19e67a2b978aec9d57dad01/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,646 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:03:12,646 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 189b562ad19e67a2b978aec9d57dad01: Waiting for close lock at 1733958192630Running coprocessor pre-close hooks at 1733958192630Disabling compacts and flushes for region at 1733958192630Disabling writes for close at 1733958192630Writing region close event to WAL at 1733958192631 (+1 ms)Running coprocessor post-close hooks at 1733958192646 (+15 ms)Closed at 1733958192646 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f2c62d44aedcf51b4978173adc0395ae: Waiting for close lock at 1733958192630Running coprocessor pre-close hooks at 1733958192630Disabling compacts and flushes for region at 1733958192630Disabling writes for close at 1733958192630Writing region close event to WAL at 1733958192631 (+1 ms)Running coprocessor post-close hooks at 1733958192646 (+15 ms)Closed at 1733958192646 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733958047266.f2c62d44aedcf51b4978173adc0395ae. 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733958047266.189b562ad19e67a2b978aec9d57dad01. 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 83c7242f4212a8a5171da84f96da9bc6, disabling compactions & flushes 2024-12-11T23:03:12,646 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. after waiting 0 ms 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T23:03:12,646 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 83c7242f4212a8a5171da84f96da9bc6 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-11T23:03:12,646 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/info/b0aa5e9783c64111ad1548a78dff8f34 is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a./info:/1733958158339/DeleteFamily/seqid=0 2024-12-11T23:03:12,647 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:03:12,647 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:03:12,649 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/.tmp/l/915263333e7143c5856a510eb6015d32 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733958157981/DeleteFamily/seqid=0 2024-12-11T23:03:12,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742473_1649 (size=6387) 2024-12-11T23:03:12,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742473_1649 (size=6387) 2024-12-11T23:03:12,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742473_1649 (size=6387) 2024-12-11T23:03:12,651 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.91 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/info/b0aa5e9783c64111ad1548a78dff8f34 2024-12-11T23:03:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742474_1650 (size=5142) 2024-12-11T23:03:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742474_1650 (size=5142) 2024-12-11T23:03:12,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742474_1650 (size=5142) 2024-12-11T23:03:12,654 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=34 (bloomFilter=false), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/.tmp/l/915263333e7143c5856a510eb6015d32 2024-12-11T23:03:12,656 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:03:12,658 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 915263333e7143c5856a510eb6015d32 2024-12-11T23:03:12,658 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/.tmp/l/915263333e7143c5856a510eb6015d32 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/l/915263333e7143c5856a510eb6015d32 2024-12-11T23:03:12,659 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/ns/2f55d5dbf9dc40a0a925bca19ccf8bf0 is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a./ns:/1733958158331/DeleteFamily/seqid=0 2024-12-11T23:03:12,660 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T23:03:12,660 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T23:03:12,663 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 915263333e7143c5856a510eb6015d32 2024-12-11T23:03:12,663 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/l/915263333e7143c5856a510eb6015d32, entries=2, sequenceid=34, filesize=5.0 K 2024-12-11T23:03:12,664 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 83c7242f4212a8a5171da84f96da9bc6 in 18ms, sequenceid=34, compaction requested=false 2024-12-11T23:03:12,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742475_1651 (size=5927) 2024-12-11T23:03:12,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742475_1651 (size=5927) 2024-12-11T23:03:12,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742475_1651 (size=5927) 2024-12-11T23:03:12,665 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/ns/2f55d5dbf9dc40a0a925bca19ccf8bf0 2024-12-11T23:03:12,667 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/acl/83c7242f4212a8a5171da84f96da9bc6/recovered.edits/37.seqid, newMaxSeqId=37, maxSeqId=1 2024-12-11T23:03:12,668 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,668 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T23:03:12,668 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 83c7242f4212a8a5171da84f96da9bc6: Waiting for close lock at 1733958192646Running coprocessor pre-close hooks at 1733958192646Disabling compacts and flushes for region at 1733958192646Disabling writes for close at 1733958192646Obtaining lock to block concurrent updates at 1733958192646Preparing flush snapshotting stores in 83c7242f4212a8a5171da84f96da9bc6 at 1733958192646Finished memstore snapshotting hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1733958192646Flushing stores of hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. at 1733958192647 (+1 ms)Flushing 83c7242f4212a8a5171da84f96da9bc6/l: creating writer at 1733958192647Flushing 83c7242f4212a8a5171da84f96da9bc6/l: appending metadata at 1733958192649 (+2 ms)Flushing 83c7242f4212a8a5171da84f96da9bc6/l: closing flushed file at 1733958192649Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ac76d88: reopening flushed file at 1733958192658 (+9 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 83c7242f4212a8a5171da84f96da9bc6 in 18ms, sequenceid=34, compaction requested=false at 1733958192664 (+6 ms)Writing region close event to WAL at 1733958192665 (+1 ms)Running coprocessor post-close hooks at 1733958192668 (+3 ms)Closed at 1733958192668 2024-12-11T23:03:12,668 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733957775423.83c7242f4212a8a5171da84f96da9bc6. 2024-12-11T23:03:12,671 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/rep_barrier/690eb44d530743c787859bba59797f7f is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a./rep_barrier:/1733958158331/DeleteFamily/seqid=0 2024-12-11T23:03:12,672 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T23:03:12,672 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T23:03:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742476_1652 (size=5990) 2024-12-11T23:03:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742476_1652 (size=5990) 2024-12-11T23:03:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742476_1652 (size=5990) 2024-12-11T23:03:12,676 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/rep_barrier/690eb44d530743c787859bba59797f7f 2024-12-11T23:03:12,682 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/table/7d862b3d479c4014b1e101b4e53e926e is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733958138358.0e8295e1189a83034c0eff7fe9c9061a./table:/1733958158331/DeleteFamily/seqid=0 2024-12-11T23:03:12,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742477_1653 (size=6012) 2024-12-11T23:03:12,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742477_1653 (size=6012) 2024-12-11T23:03:12,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742477_1653 (size=6012) 2024-12-11T23:03:12,686 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/table/7d862b3d479c4014b1e101b4e53e926e 2024-12-11T23:03:12,687 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T23:03:12,687 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T23:03:12,690 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/info/b0aa5e9783c64111ad1548a78dff8f34 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/info/b0aa5e9783c64111ad1548a78dff8f34 2024-12-11T23:03:12,694 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/info/b0aa5e9783c64111ad1548a78dff8f34, entries=8, sequenceid=243, filesize=6.2 K 2024-12-11T23:03:12,695 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/ns/2f55d5dbf9dc40a0a925bca19ccf8bf0 as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/ns/2f55d5dbf9dc40a0a925bca19ccf8bf0 2024-12-11T23:03:12,698 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/ns/2f55d5dbf9dc40a0a925bca19ccf8bf0, entries=4, sequenceid=243, filesize=5.8 K 2024-12-11T23:03:12,698 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/rep_barrier/690eb44d530743c787859bba59797f7f as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/rep_barrier/690eb44d530743c787859bba59797f7f 2024-12-11T23:03:12,702 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/rep_barrier/690eb44d530743c787859bba59797f7f, entries=4, sequenceid=243, filesize=5.8 K 2024-12-11T23:03:12,702 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/.tmp/table/7d862b3d479c4014b1e101b4e53e926e as hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/table/7d862b3d479c4014b1e101b4e53e926e 2024-12-11T23:03:12,706 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/table/7d862b3d479c4014b1e101b4e53e926e, entries=6, sequenceid=243, filesize=5.9 K 2024-12-11T23:03:12,707 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=243, compaction requested=false 2024-12-11T23:03:12,710 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/data/hbase/meta/1588230740/recovered.edits/246.seqid, newMaxSeqId=246, maxSeqId=1 2024-12-11T23:03:12,710 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:12,710 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T23:03:12,710 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T23:03:12,711 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733958192630Running coprocessor pre-close hooks at 1733958192630Disabling compacts and flushes for region at 1733958192630Disabling writes for close at 1733958192631 (+1 ms)Obtaining lock to block concurrent updates at 1733958192631Preparing flush snapshotting stores in 1588230740 at 1733958192631Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=8966, getHeapSize=15048, getOffHeapSize=0, getCellsCount=68 at 1733958192631Flushing stores of hbase:meta,,1.1588230740 at 1733958192632 (+1 ms)Flushing 1588230740/info: creating writer at 1733958192632Flushing 1588230740/info: appending metadata at 1733958192646 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733958192646Flushing 1588230740/ns: creating writer at 1733958192657 (+11 ms)Flushing 1588230740/ns: appending metadata at 1733958192659 (+2 ms)Flushing 1588230740/ns: closing flushed file at 1733958192659Flushing 1588230740/rep_barrier: creating writer at 1733958192669 (+10 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733958192671 (+2 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733958192671Flushing 1588230740/table: creating writer at 1733958192679 (+8 ms)Flushing 1588230740/table: appending metadata at 1733958192681 (+2 ms)Flushing 1588230740/table: closing flushed file at 1733958192681Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48f1f73d: reopening flushed file at 1733958192690 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15b49583: reopening flushed file at 1733958192694 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12343a3c: reopening flushed file at 1733958192698 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31be0ddb: reopening flushed file at 1733958192702 (+4 ms)Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=243, compaction requested=false at 1733958192707 (+5 ms)Writing region close event to WAL at 1733958192708 (+1 ms)Running coprocessor post-close hooks at 1733958192710 (+2 ms)Closed at 1733958192710 2024-12-11T23:03:12,711 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T23:03:12,831 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,33867,1733957770938; all regions closed. 2024-12-11T23:03:12,831 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,37563,1733957770731; all regions closed. 2024-12-11T23:03:12,831 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,41635,1733957771023; all regions closed. 2024-12-11T23:03:12,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741833_1009 (size=15195) 2024-12-11T23:03:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741836_1012 (size=102506) 2024-12-11T23:03:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741833_1009 (size=15195) 2024-12-11T23:03:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741836_1012 (size=102506) 2024-12-11T23:03:12,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741836_1012 (size=102506) 2024-12-11T23:03:12,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741835_1011 (size=16624) 2024-12-11T23:03:12,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741835_1011 (size=16624) 2024-12-11T23:03:12,839 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/WALs/b762025f20c5,37563,1733957770731/b762025f20c5%2C37563%2C1733957770731.1733957773898 not finished, retry = 0 2024-12-11T23:03:12,842 DEBUG [RS:1;b762025f20c5:33867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs 2024-12-11T23:03:12,842 DEBUG [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C33867%2C1733957770938:(num 1733957773887) 2024-12-11T23:03:12,842 INFO [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C41635%2C1733957771023.meta:.meta(num 1733957774676) 2024-12-11T23:03:12,842 DEBUG [RS:1;b762025f20c5:33867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:03:12,842 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:03:12,842 INFO [RS:1;b762025f20c5:33867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:03:12,843 INFO [RS:1;b762025f20c5:33867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33867 2024-12-11T23:03:12,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073741834_1010 (size=12451) 2024-12-11T23:03:12,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073741834_1010 (size=12451) 2024-12-11T23:03:12,846 DEBUG [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs 2024-12-11T23:03:12,846 INFO [RS:2;b762025f20c5:41635 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C41635%2C1733957771023:(num 1733957773882) 2024-12-11T23:03:12,846 DEBUG [RS:2;b762025f20c5:41635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,846 INFO [RS:2;b762025f20c5:41635 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:03:12,846 INFO [RS:2;b762025f20c5:41635 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:03:12,846 INFO [RS:2;b762025f20c5:41635 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T23:03:12,846 INFO [RS:2;b762025f20c5:41635 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:03:12,846 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:03:12,847 INFO [RS:2;b762025f20c5:41635 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41635 2024-12-11T23:03:12,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:03:12,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,33867,1733957770938 2024-12-11T23:03:12,854 INFO [RS:1;b762025f20c5:33867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:03:12,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,41635,1733957771023 2024-12-11T23:03:12,862 INFO [RS:2;b762025f20c5:41635 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:03:12,871 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,33867,1733957770938] 2024-12-11T23:03:12,890 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,33867,1733957770938 already deleted, retry=false 2024-12-11T23:03:12,890 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,33867,1733957770938 expired; onlineServers=2 2024-12-11T23:03:12,890 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,41635,1733957771023] 2024-12-11T23:03:12,898 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,41635,1733957771023 already deleted, retry=false 2024-12-11T23:03:12,898 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,41635,1733957771023 expired; onlineServers=1 2024-12-11T23:03:12,943 DEBUG [RS:0;b762025f20c5:37563 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/oldWALs 2024-12-11T23:03:12,944 INFO [RS:0;b762025f20c5:37563 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C37563%2C1733957770731:(num 1733957773898) 2024-12-11T23:03:12,944 DEBUG [RS:0;b762025f20c5:37563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T23:03:12,944 INFO [RS:0;b762025f20c5:37563 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T23:03:12,944 INFO [RS:0;b762025f20c5:37563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:03:12,944 INFO [RS:0;b762025f20c5:37563 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T23:03:12,944 INFO [RS:0;b762025f20c5:37563 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T23:03:12,945 INFO [RS:0;b762025f20c5:37563 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T23:03:12,945 INFO [RS:0;b762025f20c5:37563 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T23:03:12,945 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:03:12,945 INFO [RS:0;b762025f20c5:37563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:03:12,945 INFO [RS:0;b762025f20c5:37563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37563 2024-12-11T23:03:12,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,37563,1733957770731 2024-12-11T23:03:12,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T23:03:12,957 INFO [RS:0;b762025f20c5:37563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:03:12,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,37563,1733957770731] 2024-12-11T23:03:12,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:03:12,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33867-0x100cb8db5a40002, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:03:12,973 INFO [RS:1;b762025f20c5:33867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:03:12,973 INFO [RS:1;b762025f20c5:33867 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,33867,1733957770938; zookeeper connection closed. 2024-12-11T23:03:12,973 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,37563,1733957770731 already deleted, retry=false 2024-12-11T23:03:12,973 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,37563,1733957770731 expired; onlineServers=0 2024-12-11T23:03:12,974 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b762025f20c5,42043,1733957769583' ***** 2024-12-11T23:03:12,974 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T23:03:12,974 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@283658c4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@283658c4 2024-12-11T23:03:12,974 INFO [M:0;b762025f20c5:42043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T23:03:12,974 INFO [M:0;b762025f20c5:42043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T23:03:12,974 DEBUG [M:0;b762025f20c5:42043 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T23:03:12,975 DEBUG [M:0;b762025f20c5:42043 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T23:03:12,975 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T23:03:12,975 DEBUG [master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733957773248 {}] cleaner.HFileCleaner(306): Exit Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733957773248,5,FailOnTimeoutGroup] 2024-12-11T23:03:12,975 DEBUG [master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733957773242 {}] cleaner.HFileCleaner(306): Exit Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733957773242,5,FailOnTimeoutGroup] 2024-12-11T23:03:12,975 INFO [M:0;b762025f20c5:42043 {}] hbase.ChoreService(370): Chore service for: master/b762025f20c5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T23:03:12,975 INFO [M:0;b762025f20c5:42043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T23:03:12,975 DEBUG [M:0;b762025f20c5:42043 {}] master.HMaster(1795): Stopping service threads 2024-12-11T23:03:12,975 INFO [M:0;b762025f20c5:42043 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T23:03:12,975 INFO [M:0;b762025f20c5:42043 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T23:03:12,976 INFO [M:0;b762025f20c5:42043 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T23:03:12,976 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T23:03:12,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T23:03:12,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T23:03:12,982 DEBUG [M:0;b762025f20c5:42043 {}] zookeeper.ZKUtil(347): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T23:03:12,982 WARN [M:0;b762025f20c5:42043 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T23:03:12,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:03:12,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41635-0x100cb8db5a40003, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:03:12,982 INFO [RS:2;b762025f20c5:41635 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:03:12,982 INFO [RS:2;b762025f20c5:41635 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,41635,1733957771023; zookeeper connection closed. 2024-12-11T23:03:12,983 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7877b994 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7877b994 2024-12-11T23:03:12,983 INFO [M:0;b762025f20c5:42043 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/.lastflushedseqids 2024-12-11T23:03:12,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41607 is added to blk_1073742478_1654 (size=316) 2024-12-11T23:03:12,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073742478_1654 (size=316) 2024-12-11T23:03:12,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36751 is added to blk_1073742478_1654 (size=316) 2024-12-11T23:03:12,998 INFO [M:0;b762025f20c5:42043 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T23:03:12,998 INFO [M:0;b762025f20c5:42043 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T23:03:12,998 DEBUG [M:0;b762025f20c5:42043 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T23:03:13,011 INFO [M:0;b762025f20c5:42043 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:03:13,011 DEBUG [M:0;b762025f20c5:42043 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:03:13,011 DEBUG [M:0;b762025f20c5:42043 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T23:03:13,011 DEBUG [M:0;b762025f20c5:42043 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T23:03:13,012 INFO [M:0;b762025f20c5:42043 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=998.07 KB heapSize=1.17 MB 2024-12-11T23:03:13,012 ERROR [AsyncFSWAL-0-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:03:13,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:03:13,066 INFO [RS:0;b762025f20c5:37563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:03:13,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37563-0x100cb8db5a40001, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:03:13,066 INFO [RS:0;b762025f20c5:37563 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,37563,1733957770731; zookeeper connection closed. 2024-12-11T23:03:13,066 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6544a858 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6544a858 2024-12-11T23:03:13,067 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T23:03:17,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741834_1010 (size=12451) 2024-12-11T23:03:17,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741830_1006 (size=1172726) 2024-12-11T23:03:17,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741833_1009 (size=15195) 2024-12-11T23:03:17,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39843 is added to blk_1073741835_1011 (size=16624) 2024-12-11T23:03:18,166 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T23:03:20,119 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T23:03:25,623 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:03:38,687 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:04:08,687 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b762025f20c5:42043 239 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 36 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@5a2cbbe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40c3578a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4938 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.CountDownLatch$Sync@30cdd66f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12489 Waited count: 13311 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@50ff684d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@ada0adf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 980 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41-acceptor-0@6a03c20c-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:44931}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3502 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d069844 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45807): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 47935 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e2eaeaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45807): State: TIMED_WAITING Blocked count: 111 Waited count: 2736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45807): State: TIMED_WAITING Blocked count: 113 Waited count: 2760 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45807): State: TIMED_WAITING Blocked count: 133 Waited count: 2739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45807): State: TIMED_WAITING Blocked count: 120 Waited count: 2730 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45807): State: TIMED_WAITING Blocked count: 99 Waited count: 2719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(113298728)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp301817503-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87-acceptor-0@1a524a51-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:33665}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 973 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37769): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f300348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1499 Waited count: 1659 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@2988138-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:44567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:45807 from jenkins): State: TIMED_WAITING Blocked count: 1542 Waited count: 1541 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 0 Waited count: 2306 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 973 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35637): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 308 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c1e1b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1504 Waited count: 1633 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 489 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp626054808-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp626054808-154-acceptor-0@61b11724-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:41761}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp626054808-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 972 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 368 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7722b445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1470 Waited count: 1653 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@11dc98f8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@3ec3fd05[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@65495865[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:50092): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 15 Waited count: 386 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@471dfb5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:50092):): State: WAITING Blocked count: 2 Waited count: 494 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36a5d28a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 526 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@425ee37a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 246 (LeaseRenewer:jenkins@localhost:45807): State: TIMED_WAITING Blocked count: 15 Waited count: 505 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@465b5432 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 369 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:50092)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b029276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21119b61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d77b1b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 150 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b3dc8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 96 Waited count: 495 Waiting on java.util.concurrent.Semaphore$NonfairSync@b5d714f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043): State: WAITING Blocked count: 68 Waited count: 10283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26ba39d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4cfd294c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2c4afe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b490643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7bdc1e21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eceef5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 13 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;b762025f20c5:42043): State: TIMED_WAITING Blocked count: 12 Waited count: 4226 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1091/0x00007fc1fcf97018.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2342e959): State: TIMED_WAITING Blocked count: 0 Waited count: 161 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4805 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 44 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 158 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 47945 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@547f8f7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@532f12e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 463 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10118351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 465 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@80ffce0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 489 (LeaseRenewer:jenkins.hfs.2@localhost:45807): State: TIMED_WAITING Blocked count: 13 Waited count: 501 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 492 (LeaseRenewer:jenkins.hfs.0@localhost:45807): State: TIMED_WAITING Blocked count: 13 Waited count: 502 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (region-location-0): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 47719 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 293 Waiting on java.util.concurrent.ForkJoinPool@bb0300d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 982 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1070 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 118 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@603827c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1228 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1425 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 1047 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1652 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@5c50b9e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2008 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2935 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 882 Waiting on java.util.concurrent.ForkJoinPool@bb0300d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6719 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6720 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6721 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9548 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.ForkJoinPool@bb0300d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11488 (AsyncFSWAL-1-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56eada3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11491 (java.util.concurrent.ThreadPoolExecutor$Worker@595c07aa[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11492 (java.util.concurrent.ThreadPoolExecutor$Worker@7dbc7720[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11495 (java.util.concurrent.ThreadPoolExecutor$Worker@60729bfd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11496 (java.util.concurrent.ThreadPoolExecutor$Worker@49ebf445[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11500 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T23:04:38,687 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:05:08,688 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b762025f20c5:42043 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 36 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@5a2cbbe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40c3578a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 56 Waiting on java.util.concurrent.CountDownLatch$Sync@b0852c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12489 Waited count: 13312 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@50ff684d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@ada0adf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41-acceptor-0@6a03c20c-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:44931}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3502 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d069844 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45807): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 53872 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e2eaeaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45807): State: TIMED_WAITING Blocked count: 111 Waited count: 2797 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45807): State: TIMED_WAITING Blocked count: 113 Waited count: 2821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45807): State: TIMED_WAITING Blocked count: 133 Waited count: 2799 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45807): State: TIMED_WAITING Blocked count: 121 Waited count: 2790 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45807): State: TIMED_WAITING Blocked count: 99 Waited count: 2779 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 274 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(113298728)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp301817503-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87-acceptor-0@1a524a51-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:33665}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1093 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37769): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 339 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f300348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1519 Waited count: 1699 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@2988138-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:44567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:45807 from jenkins): State: TIMED_WAITING Blocked count: 1601 Waited count: 1600 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 0 Waited count: 2365 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1093 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35637): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 328 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c1e1b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1525 Waited count: 1674 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp626054808-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp626054808-154-acceptor-0@61b11724-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:41761}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp626054808-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1092 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 388 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7722b445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1490 Waited count: 1693 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@11dc98f8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@3ec3fd05[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@65495865[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:50092): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 274 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 15 Waited count: 391 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@471dfb5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:50092):): State: WAITING Blocked count: 2 Waited count: 499 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36a5d28a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 531 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@425ee37a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@465b5432 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:50092)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b029276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21119b61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d77b1b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 150 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b3dc8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 96 Waited count: 495 Waiting on java.util.concurrent.Semaphore$NonfairSync@b5d714f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043): State: WAITING Blocked count: 68 Waited count: 10283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26ba39d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4cfd294c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2c4afe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b490643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7bdc1e21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eceef5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 13 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;b762025f20c5:42043): State: TIMED_WAITING Blocked count: 12 Waited count: 4226 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1091/0x00007fc1fcf97018.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2342e959): State: TIMED_WAITING Blocked count: 0 Waited count: 181 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5405 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 44 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c8e6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 53947 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@547f8f7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@532f12e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 463 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10118351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 465 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@80ffce0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (region-location-0): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 53721 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 293 Waiting on java.util.concurrent.ForkJoinPool@bb0300d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1070 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 118 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@603827c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1228 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1652 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@5c50b9e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2008 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2935 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 883 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6719 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6720 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6721 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9548 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.ForkJoinPool@bb0300d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11488 (AsyncFSWAL-1-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56eada3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11500 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11501 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:05:38,688 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:06:08,688 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:06:11,216 DEBUG [master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=29, reuseRatio=74.36% 2024-12-11T23:06:11,224 DEBUG [master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b762025f20c5:42043 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 36 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@5a2cbbe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40c3578a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 62 Waiting on java.util.concurrent.CountDownLatch$Sync@1a382ce6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12489 Waited count: 13313 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@50ff684d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@ada0adf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1220 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 123 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41-acceptor-0@6a03c20c-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:44931}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3502 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d069844 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45807): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 204 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 204 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 59810 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e2eaeaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45807): State: TIMED_WAITING Blocked count: 111 Waited count: 2857 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45807): State: TIMED_WAITING Blocked count: 113 Waited count: 2881 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45807): State: TIMED_WAITING Blocked count: 133 Waited count: 2859 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45807): State: TIMED_WAITING Blocked count: 121 Waited count: 2850 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45807): State: TIMED_WAITING Blocked count: 99 Waited count: 2839 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 304 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(113298728)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp301817503-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87-acceptor-0@1a524a51-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:33665}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1213 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37769): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f300348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1539 Waited count: 1739 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@2988138-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:44567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:45807 from jenkins): State: TIMED_WAITING Blocked count: 1661 Waited count: 1660 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 0 Waited count: 2425 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1213 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35637): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 348 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c1e1b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1545 Waited count: 1714 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp626054808-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp626054808-154-acceptor-0@61b11724-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:41761}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp626054808-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1212 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 408 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7722b445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1510 Waited count: 1733 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a312ad3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18f4a0be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@11dc98f8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@3ec3fd05[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@acfd81c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@65495865[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:50092): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 304 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 15 Waited count: 395 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@471dfb5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:50092):): State: WAITING Blocked count: 2 Waited count: 503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36a5d28a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 535 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@425ee37a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@465b5432 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:50092)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b029276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21119b61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d77b1b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 150 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b3dc8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 96 Waited count: 495 Waiting on java.util.concurrent.Semaphore$NonfairSync@b5d714f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043): State: WAITING Blocked count: 68 Waited count: 10283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26ba39d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4cfd294c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2c4afe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b490643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7bdc1e21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eceef5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 13 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;b762025f20c5:42043): State: TIMED_WAITING Blocked count: 12 Waited count: 4226 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1091/0x00007fc1fcf97018.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2342e959): State: TIMED_WAITING Blocked count: 0 Waited count: 201 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6004 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 44 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c8e6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 59949 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@547f8f7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@532f12e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 463 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10118351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 465 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@80ffce0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (region-location-0): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 59723 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 293 Waiting on java.util.concurrent.ForkJoinPool@bb0300d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 994 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1070 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 118 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@603827c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1228 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1652 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@5c50b9e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2008 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6719 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6720 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6721 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9548 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11488 (AsyncFSWAL-1-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56eada3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11500 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11501 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:06:20,297 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T23:06:38,689 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:07:08,689 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b762025f20c5:42043 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 36 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@5a2cbbe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40c3578a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.CountDownLatch$Sync@46ce59d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12489 Waited count: 13314 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@50ff684d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@ada0adf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1340 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 135 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41-acceptor-0@6a03c20c-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:44931}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3502 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d069844 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45807): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 224 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 224 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 65757 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e2eaeaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45807): State: TIMED_WAITING Blocked count: 111 Waited count: 2918 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45807): State: TIMED_WAITING Blocked count: 113 Waited count: 2942 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45807): State: TIMED_WAITING Blocked count: 133 Waited count: 2921 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45807): State: TIMED_WAITING Blocked count: 121 Waited count: 2912 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45807): State: TIMED_WAITING Blocked count: 99 Waited count: 2901 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(113298728)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp301817503-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87-acceptor-0@1a524a51-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:33665}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1333 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37769): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 379 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f300348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1559 Waited count: 1779 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@2988138-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:44567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:45807 from jenkins): State: TIMED_WAITING Blocked count: 1721 Waited count: 1720 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 0 Waited count: 2485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1333 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35637): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 368 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c1e1b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1565 Waited count: 1754 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 669 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp626054808-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp626054808-154-acceptor-0@61b11724-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:41761}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp626054808-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1332 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 428 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7722b445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1530 Waited count: 1773 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a312ad3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18f4a0be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@11dc98f8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@3ec3fd05[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@acfd81c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@65495865[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:50092): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 15 Waited count: 399 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@471dfb5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:50092):): State: WAITING Blocked count: 2 Waited count: 507 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36a5d28a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 539 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@425ee37a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@465b5432 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:50092)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b029276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21119b61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d77b1b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 150 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b3dc8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 96 Waited count: 495 Waiting on java.util.concurrent.Semaphore$NonfairSync@b5d714f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043): State: WAITING Blocked count: 68 Waited count: 10283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26ba39d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4cfd294c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2c4afe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b490643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7bdc1e21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eceef5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 13 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;b762025f20c5:42043): State: TIMED_WAITING Blocked count: 12 Waited count: 4226 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1091/0x00007fc1fcf97018.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2342e959): State: TIMED_WAITING Blocked count: 0 Waited count: 221 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6604 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 44 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c8e6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65950 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@547f8f7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@532f12e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 463 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10118351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 465 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@80ffce0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (region-location-0): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65724 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 294 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1000 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1070 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 118 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@603827c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1228 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1652 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@5c50b9e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2008 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6719 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6720 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6721 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11488 (AsyncFSWAL-1-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56eada3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11506 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11507 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:07:38,689 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:08:08,690 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T23:08:13,014 DEBUG [M:0;b762025f20c5:42043 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733958192998Disabling compacts and flushes for region at 1733958192998Disabling writes for close at 1733958193011 (+13 ms)Obtaining lock to block concurrent updates at 1733958193012 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733958193012Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1022022, getHeapSize=1224600, getOffHeapSize=0, getCellsCount=2672 at 1733958193012Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733958493013 (+300001 ms) 2024-12-11T23:08:13,014 WARN [M:0;b762025f20c5:42043 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4593, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4593, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-11T23:08:13,017 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:08:13,020 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-11T23:08:13,021 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-11T23:08:13,021 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 2024-12-11T23:08:13,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:08:13,026 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:08:13,026 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 2024-12-11T23:08:13,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b762025f20c5:42043 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 36 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@5a2cbbe1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 36 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40c3578a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 74 Waiting on java.util.concurrent.CountDownLatch$Sync@32962fff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12489 Waited count: 13315 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@50ff684d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@ada0adf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1460 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41-acceptor-0@6a03c20c-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:44931}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 33 Waited count: 3502 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d069844 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45807): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 71692 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e2eaeaf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45807): State: TIMED_WAITING Blocked count: 111 Waited count: 2979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45807): State: TIMED_WAITING Blocked count: 113 Waited count: 3002 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45807): State: TIMED_WAITING Blocked count: 133 Waited count: 2981 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45807): State: TIMED_WAITING Blocked count: 121 Waited count: 2972 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45807): State: TIMED_WAITING Blocked count: 99 Waited count: 2961 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 364 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(113298728)): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp301817503-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87-acceptor-0@1a524a51-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:33665}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1453 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 37769): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 399 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f300348 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1579 Waited count: 1819 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 37769): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@2988138-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:44567}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:45807 from jenkins): State: TIMED_WAITING Blocked count: 1781 Waited count: 1780 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 0 Waited count: 2545 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1453 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35637): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 388 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c1e1b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1585 Waited count: 1794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35637): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp626054808-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc1fc42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp626054808-154-acceptor-0@61b11724-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:41761}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp626054808-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1452 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 1 Waited count: 448 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7722b445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807): State: TIMED_WAITING Blocked count: 1550 Waited count: 1813 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 730 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 731 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 735 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1)): State: TIMED_WAITING Blocked count: 23 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2)): State: TIMED_WAITING Blocked count: 12 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a312ad3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18f4a0be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@11dc98f8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@3ec3fd05[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@acfd81c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@65495865[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:50092): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 364 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 15 Waited count: 404 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@471dfb5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:50092):): State: WAITING Blocked count: 2 Waited count: 512 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36a5d28a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 544 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@425ee37a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@465b5432 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 484 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:50092)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 19 Waited count: 59 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b029276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21119b61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6eb48daa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d77b1b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 150 Waited count: 542 Waiting on java.util.concurrent.Semaphore$NonfairSync@74b3dc8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 96 Waited count: 495 Waiting on java.util.concurrent.Semaphore$NonfairSync@b5d714f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42043): State: WAITING Blocked count: 68 Waited count: 10283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26ba39d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42043): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26c0cead Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4cfd294c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2c4afe95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b490643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42043): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7bdc1e21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5eceef5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 13 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;b762025f20c5:42043): State: TIMED_WAITING Blocked count: 12 Waited count: 4227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1416/0x00007fc1fd22d9d8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/b762025f20c5:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2342e959): State: TIMED_WAITING Blocked count: 0 Waited count: 241 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7203 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 44 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c8e6b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 71952 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 421 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 24 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 443 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@547f8f7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@532f12e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 463 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10118351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 465 (regionserver/b762025f20c5:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@80ffce0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (region-location-0): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 71726 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (region-location-1): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1070 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 118 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@603827c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1228 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1652 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@5c50b9e1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2008 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2009 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20238ee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6719 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6720 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6721 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11488 (AsyncFSWAL-1-hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData-prefix:b762025f20c5,42043,1733957769583): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56eada3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11506 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11510 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11511 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1402/0x00007fc1fd226688.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-11T23:08:17,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T23:08:18,018 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-11T23:08:18,018 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T23:08:18,018 INFO [M:0;b762025f20c5:42043 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T23:08:18,019 INFO [M:0;b762025f20c5:42043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42043 2024-12-11T23:08:18,020 INFO [M:0;b762025f20c5:42043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T23:08:18,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45807/user/jenkins/test-data/2538e13d-2284-cb79-c522-98e373b0438f/MasterData/WALs/b762025f20c5,42043,1733957769583/b762025f20c5%2C42043%2C1733957769583.1733957771950 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-11T23:08:18,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:08:18,176 INFO [M:0;b762025f20c5:42043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T23:08:18,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42043-0x100cb8db5a40000, quorum=127.0.0.1:50092, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T23:08:18,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68703158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:08:18,185 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:08:18,185 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:08:18,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bc523e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:08:18,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@693ee7fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:08:18,187 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:08:18,187 WARN [BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:08:18,187 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:08:18,188 WARN [BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399083668-172.17.0.2-1733957762347 (Datanode Uuid 3bb5c23c-c8a0-4c1d-bbd0-7ffb529ed3d8) service to localhost/127.0.0.1:45807 2024-12-11T23:08:18,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data5/current/BP-1399083668-172.17.0.2-1733957762347 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:08:18,190 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data6/current/BP-1399083668-172.17.0.2-1733957762347 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:08:18,190 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:08:18,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@213fabce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:08:18,193 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:08:18,193 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:08:18,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca589bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:08:18,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f30d4b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:08:18,194 WARN [BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:08:18,194 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:08:18,194 WARN [BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399083668-172.17.0.2-1733957762347 (Datanode Uuid d14f1d73-c28d-4c4a-b4f5-a79345f45ddf) service to localhost/127.0.0.1:45807 2024-12-11T23:08:18,194 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:08:18,195 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data3/current/BP-1399083668-172.17.0.2-1733957762347 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:08:18,195 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data4/current/BP-1399083668-172.17.0.2-1733957762347 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:08:18,195 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:08:18,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b3e82e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T23:08:18,197 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:08:18,197 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:08:18,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27a35a11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:08:18,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@394d89b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:08:18,198 WARN [BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T23:08:18,198 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T23:08:18,198 WARN [BP-1399083668-172.17.0.2-1733957762347 heartbeating to localhost/127.0.0.1:45807 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1399083668-172.17.0.2-1733957762347 (Datanode Uuid 5de375d8-4cc1-4ec3-8d8a-5a2117eca9f7) service to localhost/127.0.0.1:45807 2024-12-11T23:08:18,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T23:08:18,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data1/current/BP-1399083668-172.17.0.2-1733957762347 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:08:18,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/cluster_924e55eb-f2a9-a45b-a849-c0658b027380/data/data2/current/BP-1399083668-172.17.0.2-1733957762347 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T23:08:18,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T23:08:18,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44c957a2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T23:08:18,205 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T23:08:18,205 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T23:08:18,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20e934eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T23:08:18,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41878587{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/f05bc6b8-499a-98b0-04c1-b4270f77c08b/hadoop.log.dir/,STOPPED} 2024-12-11T23:08:18,226 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T23:08:18,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down