2024-11-13 20:02:32,822 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-13 20:02:32,849 main DEBUG Took 0.022701 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 20:02:32,850 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 20:02:32,850 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 20:02:32,852 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 20:02:32,854 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,870 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 20:02:32,910 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,912 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,913 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,914 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,915 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,915 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,916 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,917 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,917 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,918 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,919 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,919 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,930 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,931 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,932 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,933 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,933 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,934 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,935 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,936 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,937 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,939 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 20:02:32,940 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,940 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 20:02:32,957 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 20:02:32,959 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 20:02:32,962 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 20:02:32,962 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 20:02:32,964 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 20:02:32,964 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 20:02:32,977 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 20:02:32,981 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 20:02:32,983 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 20:02:32,983 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 20:02:32,984 main DEBUG createAppenders(={Console}) 2024-11-13 20:02:32,985 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-11-13 20:02:32,985 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-13 20:02:32,985 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-11-13 20:02:32,986 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 20:02:32,987 main DEBUG OutputStream closed 2024-11-13 20:02:32,989 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 20:02:32,990 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 20:02:32,990 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-11-13 20:02:33,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 20:02:33,120 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 20:02:33,122 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 20:02:33,124 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 20:02:33,125 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 20:02:33,126 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 20:02:33,126 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 20:02:33,126 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 20:02:33,127 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 20:02:33,127 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 20:02:33,128 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 20:02:33,128 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 20:02:33,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 20:02:33,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 20:02:33,130 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 20:02:33,130 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 20:02:33,131 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 20:02:33,132 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 20:02:33,134 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 20:02:33,134 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-11-13 20:02:33,135 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 20:02:33,136 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-11-13T20:02:33,169 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-13 20:02:33,174 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 20:02:33,174 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T20:02:33,729 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584 2024-11-13T20:02:33,730 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-11-13T20:02:33,732 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-11-13T20:02:33,834 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T20:02:34,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T20:02:34,198 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c, deleteOnExit=true 2024-11-13T20:02:34,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T20:02:34,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/test.cache.data in system properties and HBase conf 2024-11-13T20:02:34,203 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T20:02:34,203 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir in system properties and HBase conf 2024-11-13T20:02:34,204 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T20:02:34,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T20:02:34,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T20:02:34,361 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T20:02:34,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T20:02:34,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T20:02:34,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T20:02:34,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T20:02:34,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T20:02:34,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T20:02:34,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T20:02:34,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T20:02:34,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T20:02:34,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/nfs.dump.dir in system properties and HBase conf 2024-11-13T20:02:34,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir in system properties and HBase conf 2024-11-13T20:02:34,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T20:02:34,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T20:02:34,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T20:02:35,537 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T20:02:35,677 INFO [Time-limited test {}] log.Log(170): Logging initialized @4175ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T20:02:35,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:35,915 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:35,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:35,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:35,971 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T20:02:35,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:35,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5140b357{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:35,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cebb95a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:36,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d3f6b4f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-37069-hadoop-hdfs-3_4_1-tests_jar-_-any-4484640705373083198/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T20:02:36,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:37069} 2024-11-13T20:02:36,340 INFO [Time-limited test {}] server.Server(415): Started @4839ms 2024-11-13T20:02:36,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:36,948 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:36,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:36,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:36,967 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T20:02:36,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69e7add{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:36,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@148bf833{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:37,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3de744c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-39707-hadoop-hdfs-3_4_1-tests_jar-_-any-1833354582248707186/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T20:02:37,158 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:39707} 2024-11-13T20:02:37,158 INFO [Time-limited test {}] server.Server(415): Started @5658ms 2024-11-13T20:02:37,261 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T20:02:37,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:37,499 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:37,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:37,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:37,509 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T20:02:37,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32c01182{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:37,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22fc93bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:37,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b7103b4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-40503-hadoop-hdfs-3_4_1-tests_jar-_-any-4561949443318286359/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T20:02:37,710 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:40503} 2024-11-13T20:02:37,710 INFO [Time-limited test {}] server.Server(415): Started @6209ms 2024-11-13T20:02:37,715 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T20:02:37,911 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:37,923 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:37,954 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:37,954 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:37,954 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T20:02:37,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76212b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:37,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e4553a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:38,027 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198/current, will proceed with Du for space computation calculation, 2024-11-13T20:02:38,027 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198/current, will proceed with Du for space computation calculation, 2024-11-13T20:02:38,028 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198/current, will proceed with Du for space computation calculation, 2024-11-13T20:02:38,027 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198/current, will proceed with Du for space computation calculation, 2024-11-13T20:02:38,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de6c5d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-39501-hadoop-hdfs-3_4_1-tests_jar-_-any-13401365108906760256/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T20:02:38,174 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:39501} 2024-11-13T20:02:38,174 INFO [Time-limited test {}] server.Server(415): Started @6673ms 2024-11-13T20:02:38,184 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T20:02:38,215 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T20:02:38,216 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T20:02:38,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5899281d06386895 with lease ID 0x9351d3c366e69ff2: Processing first storage report for DS-46f3123e-655f-491b-8abd-3b031664dcf7 from datanode DatanodeRegistration(127.0.0.1:42549, datanodeUuid=11dcf2d9-05d6-42c6-8e49-507f8540cb52, infoPort=46171, infoSecurePort=0, ipcPort=41717, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198) 2024-11-13T20:02:38,327 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5899281d06386895 with lease ID 0x9351d3c366e69ff2: from storage DS-46f3123e-655f-491b-8abd-3b031664dcf7 node DatanodeRegistration(127.0.0.1:42549, datanodeUuid=11dcf2d9-05d6-42c6-8e49-507f8540cb52, infoPort=46171, infoSecurePort=0, ipcPort=41717, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T20:02:38,328 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5899281d06386895 with lease ID 0x9351d3c366e69ff2: Processing first storage report for DS-cff9375f-a221-4fff-99ad-70252783c643 from datanode DatanodeRegistration(127.0.0.1:42549, datanodeUuid=11dcf2d9-05d6-42c6-8e49-507f8540cb52, infoPort=46171, infoSecurePort=0, ipcPort=41717, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198) 2024-11-13T20:02:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5899281d06386895 with lease ID 0x9351d3c366e69ff2: from storage DS-cff9375f-a221-4fff-99ad-70252783c643 node DatanodeRegistration(127.0.0.1:42549, datanodeUuid=11dcf2d9-05d6-42c6-8e49-507f8540cb52, infoPort=46171, infoSecurePort=0, ipcPort=41717, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198), blocks: 0, hasStaleStorage: false, processing time: 3 msecs, invalidatedBlocks: 0 2024-11-13T20:02:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28670c7e52322c58 with lease ID 0x9351d3c366e69ff1: Processing first storage report for DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c from datanode DatanodeRegistration(127.0.0.1:38925, datanodeUuid=28be5666-4b1b-41de-bf86-7a826041cf21, infoPort=44715, infoSecurePort=0, ipcPort=40237, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198) 2024-11-13T20:02:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28670c7e52322c58 with lease ID 0x9351d3c366e69ff1: from storage DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c node DatanodeRegistration(127.0.0.1:38925, datanodeUuid=28be5666-4b1b-41de-bf86-7a826041cf21, infoPort=44715, infoSecurePort=0, ipcPort=40237, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T20:02:38,332 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x28670c7e52322c58 with lease ID 0x9351d3c366e69ff1: Processing first storage report for DS-f5ff18ef-253f-45f4-806b-cd31c84ea4e5 from datanode DatanodeRegistration(127.0.0.1:38925, datanodeUuid=28be5666-4b1b-41de-bf86-7a826041cf21, infoPort=44715, infoSecurePort=0, ipcPort=40237, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198) 2024-11-13T20:02:38,332 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x28670c7e52322c58 with lease ID 0x9351d3c366e69ff1: from storage DS-f5ff18ef-253f-45f4-806b-cd31c84ea4e5 node DatanodeRegistration(127.0.0.1:38925, datanodeUuid=28be5666-4b1b-41de-bf86-7a826041cf21, infoPort=44715, infoSecurePort=0, ipcPort=40237, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T20:02:38,488 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198/current, will proceed with Du for space computation calculation, 2024-11-13T20:02:38,492 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198/current, will proceed with Du for space computation calculation, 2024-11-13T20:02:38,634 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T20:02:38,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23f0fbcc396e4553 with lease ID 0x9351d3c366e69ff3: Processing first storage report for DS-a13b1b87-3a82-400c-a778-df66e98479c9 from datanode DatanodeRegistration(127.0.0.1:33293, datanodeUuid=78bac615-6573-4f37-853d-8a4bd794b839, infoPort=42369, infoSecurePort=0, ipcPort=35411, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198) 2024-11-13T20:02:38,652 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23f0fbcc396e4553 with lease ID 0x9351d3c366e69ff3: from storage DS-a13b1b87-3a82-400c-a778-df66e98479c9 node DatanodeRegistration(127.0.0.1:33293, datanodeUuid=78bac615-6573-4f37-853d-8a4bd794b839, infoPort=42369, infoSecurePort=0, ipcPort=35411, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T20:02:38,653 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23f0fbcc396e4553 with lease ID 0x9351d3c366e69ff3: Processing first storage report for DS-bcd7189d-2416-45f2-aa9f-f832dc59fae4 from datanode DatanodeRegistration(127.0.0.1:33293, datanodeUuid=78bac615-6573-4f37-853d-8a4bd794b839, infoPort=42369, infoSecurePort=0, ipcPort=35411, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198) 2024-11-13T20:02:38,654 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23f0fbcc396e4553 with lease ID 0x9351d3c366e69ff3: from storage DS-bcd7189d-2416-45f2-aa9f-f832dc59fae4 node DatanodeRegistration(127.0.0.1:33293, datanodeUuid=78bac615-6573-4f37-853d-8a4bd794b839, infoPort=42369, infoSecurePort=0, ipcPort=35411, storageInfo=lv=-57;cid=testClusterID;nsid=702932660;c=1731528155198), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T20:02:38,853 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584 2024-11-13T20:02:38,986 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/zookeeper_0, clientPort=62658, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T20:02:39,028 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62658 2024-11-13T20:02:39,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:39,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:39,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741825_1001 (size=7) 2024-11-13T20:02:39,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741825_1001 (size=7) 2024-11-13T20:02:39,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741825_1001 (size=7) 2024-11-13T20:02:39,898 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 with version=8 2024-11-13T20:02:39,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/hbase-staging 2024-11-13T20:02:40,021 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T20:02:40,335 INFO [Time-limited test {}] client.ConnectionUtils(128): master/54d0784d1943:0 server-side Connection retries=45 2024-11-13T20:02:40,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:40,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:40,353 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T20:02:40,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:40,354 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T20:02:40,527 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T20:02:40,611 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T20:02:40,625 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T20:02:40,630 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T20:02:40,669 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 21093 (auto-detected) 2024-11-13T20:02:40,671 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-13T20:02:40,700 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34357 2024-11-13T20:02:40,734 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34357 connecting to ZooKeeper ensemble=127.0.0.1:62658 2024-11-13T20:02:40,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343570x0, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T20:02:40,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34357-0x100edb1e38f0000 connected 2024-11-13T20:02:40,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:40,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:40,843 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:02:40,847 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248, hbase.cluster.distributed=false 2024-11-13T20:02:40,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T20:02:40,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34357 2024-11-13T20:02:40,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34357 2024-11-13T20:02:40,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34357 2024-11-13T20:02:40,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34357 2024-11-13T20:02:40,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34357 2024-11-13T20:02:41,044 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/54d0784d1943:0 server-side Connection retries=45 2024-11-13T20:02:41,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,048 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T20:02:41,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T20:02:41,057 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T20:02:41,060 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T20:02:41,061 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36439 2024-11-13T20:02:41,064 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36439 connecting to ZooKeeper ensemble=127.0.0.1:62658 2024-11-13T20:02:41,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364390x0, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T20:02:41,082 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364390x0, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:02:41,087 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T20:02:41,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36439-0x100edb1e38f0001 connected 2024-11-13T20:02:41,107 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-13T20:02:41,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T20:02:41,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T20:02:41,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36439 2024-11-13T20:02:41,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36439 2024-11-13T20:02:41,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36439 2024-11-13T20:02:41,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36439 2024-11-13T20:02:41,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36439 2024-11-13T20:02:41,198 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/54d0784d1943:0 server-side Connection retries=45 2024-11-13T20:02:41,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,199 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T20:02:41,199 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,199 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T20:02:41,200 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T20:02:41,200 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T20:02:41,201 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33333 2024-11-13T20:02:41,203 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33333 connecting to ZooKeeper ensemble=127.0.0.1:62658 2024-11-13T20:02:41,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,207 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333330x0, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T20:02:41,221 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33333-0x100edb1e38f0002 connected 2024-11-13T20:02:41,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:02:41,223 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T20:02:41,227 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-13T20:02:41,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T20:02:41,231 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T20:02:41,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33333 2024-11-13T20:02:41,237 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33333 2024-11-13T20:02:41,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33333 2024-11-13T20:02:41,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33333 2024-11-13T20:02:41,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33333 2024-11-13T20:02:41,273 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/54d0784d1943:0 server-side Connection retries=45 2024-11-13T20:02:41,274 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,274 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,274 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T20:02:41,274 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T20:02:41,274 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T20:02:41,275 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T20:02:41,275 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T20:02:41,278 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39533 2024-11-13T20:02:41,280 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39533 connecting to ZooKeeper ensemble=127.0.0.1:62658 2024-11-13T20:02:41,281 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395330x0, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T20:02:41,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:395330x0, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:02:41,297 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T20:02:41,301 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39533-0x100edb1e38f0003 connected 2024-11-13T20:02:41,301 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-13T20:02:41,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T20:02:41,304 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T20:02:41,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39533 2024-11-13T20:02:41,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39533 2024-11-13T20:02:41,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39533 2024-11-13T20:02:41,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39533 2024-11-13T20:02:41,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39533 2024-11-13T20:02:41,346 DEBUG [M:0;54d0784d1943:34357 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;54d0784d1943:34357 2024-11-13T20:02:41,347 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/54d0784d1943,34357,1731528160092 2024-11-13T20:02:41,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,360 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/54d0784d1943,34357,1731528160092 2024-11-13T20:02:41,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T20:02:41,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T20:02:41,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T20:02:41,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,392 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T20:02:41,393 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/54d0784d1943,34357,1731528160092 from backup master directory 2024-11-13T20:02:41,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/54d0784d1943,34357,1731528160092 2024-11-13T20:02:41,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T20:02:41,398 WARN [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T20:02:41,399 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=54d0784d1943,34357,1731528160092 2024-11-13T20:02:41,402 INFO [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T20:02:41,406 INFO [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T20:02:41,492 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/hbase.id] with ID: f805b11d-02b0-4f5c-91d0-d72e768276fd 2024-11-13T20:02:41,492 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.tmp/hbase.id 2024-11-13T20:02:41,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741826_1002 (size=42) 2024-11-13T20:02:41,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741826_1002 (size=42) 2024-11-13T20:02:41,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741826_1002 (size=42) 2024-11-13T20:02:41,537 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.tmp/hbase.id]:[hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/hbase.id] 2024-11-13T20:02:41,614 INFO [master/54d0784d1943:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T20:02:41,629 INFO [master/54d0784d1943:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T20:02:41,656 INFO [master/54d0784d1943:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 25ms. 2024-11-13T20:02:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:41,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741827_1003 (size=196) 2024-11-13T20:02:41,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741827_1003 (size=196) 2024-11-13T20:02:41,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741827_1003 (size=196) 2024-11-13T20:02:41,754 INFO [master/54d0784d1943:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:02:41,757 INFO [master/54d0784d1943:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T20:02:41,778 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:02:41,784 INFO [master/54d0784d1943:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T20:02:41,827 WARN [IPC Server handler 3 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:41,827 WARN [IPC Server handler 3 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:41,828 WARN [IPC Server handler 3 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:41,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741828_1004 (size=1189) 2024-11-13T20:02:41,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741828_1004 (size=1189) 2024-11-13T20:02:41,894 INFO [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/data/master/store 2024-11-13T20:02:41,912 WARN [IPC Server handler 2 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:41,912 WARN [IPC Server handler 2 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:41,913 WARN [IPC Server handler 2 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:41,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741829_1005 (size=34) 2024-11-13T20:02:41,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741829_1005 (size=34) 2024-11-13T20:02:41,927 INFO [master/54d0784d1943:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T20:02:41,931 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:41,933 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T20:02:41,933 INFO [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:02:41,933 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:02:41,935 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T20:02:41,935 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:02:41,935 INFO [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:02:41,937 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731528161933Disabling compacts and flushes for region at 1731528161933Disabling writes for close at 1731528161935 (+2 ms)Writing region close event to WAL at 1731528161935Closed at 1731528161935 2024-11-13T20:02:41,939 WARN [master/54d0784d1943:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/data/master/store/.initializing 2024-11-13T20:02:41,940 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092 2024-11-13T20:02:41,952 INFO [master/54d0784d1943:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T20:02:41,971 INFO [master/54d0784d1943:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=54d0784d1943%2C34357%2C1731528160092, suffix=, logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092, archiveDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/oldWALs, maxLogs=10 2024-11-13T20:02:41,999 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976, exclude list is [], retry=0 2024-11-13T20:02:42,003 WARN [IPC Server handler 4 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:42,003 WARN [IPC Server handler 4 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:42,004 WARN [IPC Server handler 4 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:42,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42549,DS-46f3123e-655f-491b-8abd-3b031664dcf7,DISK] 2024-11-13T20:02:42,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38925,DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c,DISK] 2024-11-13T20:02:42,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-13T20:02:42,084 INFO [master/54d0784d1943:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 2024-11-13T20:02:42,085 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44715:44715),(127.0.0.1/127.0.0.1:46171:46171)] 2024-11-13T20:02:42,086 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T20:02:42,087 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:42,092 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,094 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T20:02:42,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:42,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:42,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T20:02:42,211 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:42,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:02:42,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T20:02:42,216 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:42,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:02:42,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T20:02:42,224 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:42,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:02:42,227 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,232 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,233 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,245 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,246 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,251 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T20:02:42,262 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T20:02:42,275 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:02:42,277 INFO [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69728814, jitterRate=0.03904029726982117}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T20:02:42,294 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731528162114Initializing all the Stores at 1731528162118 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528162118Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528162119 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528162119Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528162119Cleaning up temporary data from old regions at 1731528162246 (+127 ms)Region opened successfully at 1731528162293 (+47 ms) 2024-11-13T20:02:42,296 INFO [master/54d0784d1943:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T20:02:42,364 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bb5c517, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=54d0784d1943/172.17.0.2:0 2024-11-13T20:02:42,408 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T20:02:42,424 INFO [master/54d0784d1943:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T20:02:42,424 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T20:02:42,428 INFO [master/54d0784d1943:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T20:02:42,430 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-13T20:02:42,436 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-13T20:02:42,436 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T20:02:42,491 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T20:02:42,505 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T20:02:42,508 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T20:02:42,512 INFO [master/54d0784d1943:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T20:02:42,523 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T20:02:42,526 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T20:02:42,529 INFO [master/54d0784d1943:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T20:02:42,535 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T20:02:42,537 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T20:02:42,539 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T20:02:42,541 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T20:02:42,564 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T20:02:42,566 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T20:02:42,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,575 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=54d0784d1943,34357,1731528160092, sessionid=0x100edb1e38f0000, setting cluster-up flag (Was=false) 2024-11-13T20:02:42,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,602 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T20:02:42,604 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=54d0784d1943,34357,1731528160092 2024-11-13T20:02:42,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:42,618 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T20:02:42,620 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=54d0784d1943,34357,1731528160092 2024-11-13T20:02:42,627 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T20:02:42,635 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(746): ClusterId : f805b11d-02b0-4f5c-91d0-d72e768276fd 2024-11-13T20:02:42,635 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(746): ClusterId : f805b11d-02b0-4f5c-91d0-d72e768276fd 2024-11-13T20:02:42,640 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T20:02:42,640 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T20:02:42,641 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(746): ClusterId : f805b11d-02b0-4f5c-91d0-d72e768276fd 2024-11-13T20:02:42,642 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T20:02:42,650 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T20:02:42,650 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T20:02:42,650 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T20:02:42,651 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T20:02:42,651 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T20:02:42,651 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T20:02:42,655 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T20:02:42,655 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T20:02:42,656 DEBUG [RS:1;54d0784d1943:33333 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4865a622, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=54d0784d1943/172.17.0.2:0 2024-11-13T20:02:42,656 DEBUG [RS:0;54d0784d1943:36439 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70cd70a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=54d0784d1943/172.17.0.2:0 2024-11-13T20:02:42,679 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;54d0784d1943:36439 2024-11-13T20:02:42,679 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;54d0784d1943:33333 2024-11-13T20:02:42,683 INFO [RS:0;54d0784d1943:36439 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T20:02:42,683 INFO [RS:0;54d0784d1943:36439 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T20:02:42,684 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-13T20:02:42,684 INFO [RS:1;54d0784d1943:33333 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T20:02:42,684 INFO [RS:1;54d0784d1943:33333 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T20:02:42,685 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T20:02:42,694 DEBUG [RS:2;54d0784d1943:39533 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2449a5d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=54d0784d1943/172.17.0.2:0 2024-11-13T20:02:42,694 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-13T20:02:42,695 INFO [master/54d0784d1943:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:42,698 INFO [RS:0;54d0784d1943:36439 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:42,699 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T20:02:42,699 INFO [master/54d0784d1943:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-13T20:02:42,695 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-13T20:02:42,702 INFO [RS:1;54d0784d1943:33333 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:42,703 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T20:02:42,704 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=33333, startcode=1731528161197 2024-11-13T20:02:42,706 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=36439, startcode=1731528160987 2024-11-13T20:02:42,722 DEBUG [RS:1;54d0784d1943:33333 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T20:02:42,726 DEBUG [RS:0;54d0784d1943:36439 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T20:02:42,729 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;54d0784d1943:39533 2024-11-13T20:02:42,729 INFO [RS:2;54d0784d1943:39533 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T20:02:42,729 INFO [RS:2;54d0784d1943:39533 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T20:02:42,729 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-13T20:02:42,730 INFO [RS:2;54d0784d1943:39533 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:42,730 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T20:02:42,732 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=39533, startcode=1731528161273 2024-11-13T20:02:42,732 DEBUG [RS:2;54d0784d1943:39533 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T20:02:42,796 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:42,796 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:42,796 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:42,797 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:42,797 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:42,797 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:42,797 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:42,797 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:42,797 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38883, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T20:02:42,798 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41965, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T20:02:42,798 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42185, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T20:02:42,801 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T20:02:42,805 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T20:02:42,812 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T20:02:42,813 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T20:02:42,816 INFO [master/54d0784d1943:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T20:02:42,826 INFO [master/54d0784d1943:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T20:02:42,833 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 54d0784d1943,34357,1731528160092 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T20:02:42,847 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T20:02:42,847 WARN [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T20:02:42,847 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T20:02:42,847 WARN [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T20:02:42,849 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T20:02:42,849 WARN [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T20:02:42,851 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/54d0784d1943:0, corePoolSize=5, maxPoolSize=5 2024-11-13T20:02:42,851 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/54d0784d1943:0, corePoolSize=5, maxPoolSize=5 2024-11-13T20:02:42,851 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/54d0784d1943:0, corePoolSize=5, maxPoolSize=5 2024-11-13T20:02:42,851 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/54d0784d1943:0, corePoolSize=5, maxPoolSize=5 2024-11-13T20:02:42,851 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/54d0784d1943:0, corePoolSize=10, maxPoolSize=10 2024-11-13T20:02:42,852 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:42,852 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/54d0784d1943:0, corePoolSize=2, maxPoolSize=2 2024-11-13T20:02:42,852 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:42,894 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T20:02:42,895 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T20:02:42,902 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731528192902 2024-11-13T20:02:42,903 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T20:02:42,904 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T20:02:42,906 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:42,906 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T20:02:42,911 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T20:02:42,912 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T20:02:42,912 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T20:02:42,912 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T20:02:42,914 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:42,923 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T20:02:42,924 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T20:02:42,925 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T20:02:42,926 WARN [IPC Server handler 0 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:42,926 WARN [IPC Server handler 0 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:42,926 WARN [IPC Server handler 0 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:42,945 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T20:02:42,946 INFO [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T20:02:42,949 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=39533, startcode=1731528161273 2024-11-13T20:02:42,949 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/54d0784d1943:0:becomeActiveMaster-HFileCleaner.large.0-1731528162947,5,FailOnTimeoutGroup] 2024-11-13T20:02:42,949 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=36439, startcode=1731528160987 2024-11-13T20:02:42,951 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T20:02:42,951 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=33333, startcode=1731528161197 2024-11-13T20:02:42,953 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T20:02:42,952 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T20:02:42,953 WARN [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-13T20:02:42,953 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/54d0784d1943:0:becomeActiveMaster-HFileCleaner.small.0-1731528162950,5,FailOnTimeoutGroup] 2024-11-13T20:02:42,954 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:42,954 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T20:02:42,954 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T20:02:42,955 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T20:02:42,955 WARN [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-13T20:02:42,955 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:42,956 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:42,956 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T20:02:42,957 WARN [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-13T20:02:42,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741831_1007 (size=1321) 2024-11-13T20:02:42,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741831_1007 (size=1321) 2024-11-13T20:02:42,971 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T20:02:42,972 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:43,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741832_1008 (size=32) 2024-11-13T20:02:43,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741832_1008 (size=32) 2024-11-13T20:02:43,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741832_1008 (size=32) 2024-11-13T20:02:43,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:43,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T20:02:43,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T20:02:43,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:43,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:43,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T20:02:43,088 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T20:02:43,088 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:43,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:43,090 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T20:02:43,094 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T20:02:43,094 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:43,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:43,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T20:02:43,098 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T20:02:43,098 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:43,101 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:43,101 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T20:02:43,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740 2024-11-13T20:02:43,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740 2024-11-13T20:02:43,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T20:02:43,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T20:02:43,113 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T20:02:43,119 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T20:02:43,128 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:02:43,130 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64187017, jitterRate=-0.04353891313076019}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T20:02:43,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731528163074Initializing all the Stores at 1731528163076 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528163076Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528163077 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528163078 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528163078Cleaning up temporary data from old regions at 1731528163111 (+33 ms)Region opened successfully at 1731528163134 (+23 ms) 2024-11-13T20:02:43,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T20:02:43,134 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T20:02:43,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T20:02:43,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T20:02:43,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T20:02:43,137 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T20:02:43,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731528163134Disabling compacts and flushes for region at 1731528163134Disabling writes for close at 1731528163135 (+1 ms)Writing region close event to WAL at 1731528163136 (+1 ms)Closed at 1731528163137 (+1 ms) 2024-11-13T20:02:43,144 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T20:02:43,144 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T20:02:43,157 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=33333, startcode=1731528161197 2024-11-13T20:02:43,157 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=39533, startcode=1731528161273 2024-11-13T20:02:43,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T20:02:43,158 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(2659): reportForDuty to master=54d0784d1943,34357,1731528160092 with port=36439, startcode=1731528160987 2024-11-13T20:02:43,163 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 54d0784d1943,36439,1731528160987 2024-11-13T20:02:43,167 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] master.ServerManager(517): Registering regionserver=54d0784d1943,36439,1731528160987 2024-11-13T20:02:43,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 54d0784d1943,39533,1731528161273 2024-11-13T20:02:43,181 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] master.ServerManager(517): Registering regionserver=54d0784d1943,39533,1731528161273 2024-11-13T20:02:43,181 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:43,181 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43181 2024-11-13T20:02:43,182 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T20:02:43,182 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T20:02:43,187 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,187 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:43,188 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43181 2024-11-13T20:02:43,188 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] master.ServerManager(517): Registering regionserver=54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,188 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T20:02:43,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T20:02:43,189 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T20:02:43,197 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:43,197 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43181 2024-11-13T20:02:43,197 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T20:02:43,198 DEBUG [RS:0;54d0784d1943:36439 {}] zookeeper.ZKUtil(111): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/54d0784d1943,36439,1731528160987 2024-11-13T20:02:43,198 WARN [RS:0;54d0784d1943:36439 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T20:02:43,198 INFO [RS:0;54d0784d1943:36439 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T20:02:43,199 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,36439,1731528160987 2024-11-13T20:02:43,199 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [54d0784d1943,36439,1731528160987] 2024-11-13T20:02:43,200 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [54d0784d1943,39533,1731528161273] 2024-11-13T20:02:43,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T20:02:43,201 DEBUG [RS:2;54d0784d1943:39533 {}] zookeeper.ZKUtil(111): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/54d0784d1943,39533,1731528161273 2024-11-13T20:02:43,201 WARN [RS:2;54d0784d1943:39533 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T20:02:43,201 INFO [RS:2;54d0784d1943:39533 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T20:02:43,201 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,39533,1731528161273 2024-11-13T20:02:43,203 DEBUG [RS:1;54d0784d1943:33333 {}] zookeeper.ZKUtil(111): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,203 WARN [RS:1;54d0784d1943:33333 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T20:02:43,203 INFO [RS:1;54d0784d1943:33333 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T20:02:43,203 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [54d0784d1943,33333,1731528161197] 2024-11-13T20:02:43,203 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,268 INFO [RS:0;54d0784d1943:36439 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T20:02:43,268 INFO [RS:1;54d0784d1943:33333 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T20:02:43,269 INFO [RS:2;54d0784d1943:39533 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T20:02:43,317 INFO [RS:1;54d0784d1943:33333 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T20:02:43,325 INFO [RS:1;54d0784d1943:33333 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T20:02:43,325 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,336 INFO [RS:0;54d0784d1943:36439 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T20:02:43,340 WARN [54d0784d1943:34357 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-13T20:02:43,341 INFO [RS:2;54d0784d1943:39533 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T20:02:43,343 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T20:02:43,346 INFO [RS:2;54d0784d1943:39533 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T20:02:43,346 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,350 INFO [RS:0;54d0784d1943:36439 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T20:02:43,350 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,351 INFO [RS:1;54d0784d1943:33333 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T20:02:43,351 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T20:02:43,352 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T20:02:43,353 INFO [RS:2;54d0784d1943:39533 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T20:02:43,353 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,354 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,354 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,354 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,355 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,355 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/54d0784d1943:0, corePoolSize=2, maxPoolSize=2 2024-11-13T20:02:43,355 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/54d0784d1943:0, corePoolSize=2, maxPoolSize=2 2024-11-13T20:02:43,355 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,355 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,355 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,355 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,355 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 INFO [RS:0;54d0784d1943:36439 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T20:02:43,355 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,356 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,356 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0, corePoolSize=3, maxPoolSize=3 2024-11-13T20:02:43,356 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,357 DEBUG [RS:1;54d0784d1943:33333 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/54d0784d1943:0, corePoolSize=3, maxPoolSize=3 2024-11-13T20:02:43,357 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0, corePoolSize=3, maxPoolSize=3 2024-11-13T20:02:43,357 DEBUG [RS:2;54d0784d1943:39533 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/54d0784d1943:0, corePoolSize=3, maxPoolSize=3 2024-11-13T20:02:43,357 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,358 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,358 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,358 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,358 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,358 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/54d0784d1943:0, corePoolSize=2, maxPoolSize=2 2024-11-13T20:02:43,358 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,359 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,359 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,359 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,359 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,359 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/54d0784d1943:0, corePoolSize=1, maxPoolSize=1 2024-11-13T20:02:43,359 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0, corePoolSize=3, maxPoolSize=3 2024-11-13T20:02:43,360 DEBUG [RS:0;54d0784d1943:36439 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/54d0784d1943:0, corePoolSize=3, maxPoolSize=3 2024-11-13T20:02:43,419 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,419 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,419 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,419 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,419 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,419 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,33333,1731528161197-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T20:02:43,421 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,421 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,421 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,422 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,422 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,422 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,39533,1731528161273-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T20:02:43,424 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,424 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,424 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,424 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,424 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,424 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,36439,1731528160987-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T20:02:43,460 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T20:02:43,460 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T20:02:43,463 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,33333,1731528161197-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,463 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,39533,1731528161273-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,464 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,464 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,464 INFO [RS:1;54d0784d1943:33333 {}] regionserver.Replication(171): 54d0784d1943,33333,1731528161197 started 2024-11-13T20:02:43,464 INFO [RS:2;54d0784d1943:39533 {}] regionserver.Replication(171): 54d0784d1943,39533,1731528161273 started 2024-11-13T20:02:43,466 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T20:02:43,466 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,36439,1731528160987-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,466 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,468 INFO [RS:0;54d0784d1943:36439 {}] regionserver.Replication(171): 54d0784d1943,36439,1731528160987 started 2024-11-13T20:02:43,496 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,496 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1482): Serving as 54d0784d1943,33333,1731528161197, RpcServer on 54d0784d1943/172.17.0.2:33333, sessionid=0x100edb1e38f0002 2024-11-13T20:02:43,497 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T20:02:43,496 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,498 DEBUG [RS:1;54d0784d1943:33333 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,498 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '54d0784d1943,33333,1731528161197' 2024-11-13T20:02:43,498 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1482): Serving as 54d0784d1943,39533,1731528161273, RpcServer on 54d0784d1943/172.17.0.2:39533, sessionid=0x100edb1e38f0003 2024-11-13T20:02:43,498 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T20:02:43,498 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T20:02:43,498 DEBUG [RS:2;54d0784d1943:39533 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 54d0784d1943,39533,1731528161273 2024-11-13T20:02:43,498 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '54d0784d1943,39533,1731528161273' 2024-11-13T20:02:43,498 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T20:02:43,498 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:43,499 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1482): Serving as 54d0784d1943,36439,1731528160987, RpcServer on 54d0784d1943/172.17.0.2:36439, sessionid=0x100edb1e38f0001 2024-11-13T20:02:43,499 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T20:02:43,499 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T20:02:43,499 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T20:02:43,500 DEBUG [RS:0;54d0784d1943:36439 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 54d0784d1943,36439,1731528160987 2024-11-13T20:02:43,500 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '54d0784d1943,36439,1731528160987' 2024-11-13T20:02:43,500 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T20:02:43,500 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T20:02:43,500 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T20:02:43,500 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T20:02:43,500 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T20:02:43,500 DEBUG [RS:1;54d0784d1943:33333 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,500 DEBUG [RS:2;54d0784d1943:39533 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 54d0784d1943,39533,1731528161273 2024-11-13T20:02:43,500 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '54d0784d1943,33333,1731528161197' 2024-11-13T20:02:43,500 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '54d0784d1943,39533,1731528161273' 2024-11-13T20:02:43,501 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T20:02:43,501 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T20:02:43,501 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T20:02:43,502 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T20:02:43,502 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T20:02:43,502 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T20:02:43,502 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T20:02:43,502 DEBUG [RS:0;54d0784d1943:36439 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 54d0784d1943,36439,1731528160987 2024-11-13T20:02:43,502 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '54d0784d1943,36439,1731528160987' 2024-11-13T20:02:43,502 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T20:02:43,503 DEBUG [RS:2;54d0784d1943:39533 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T20:02:43,503 INFO [RS:2;54d0784d1943:39533 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T20:02:43,503 INFO [RS:2;54d0784d1943:39533 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T20:02:43,503 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T20:02:43,504 DEBUG [RS:0;54d0784d1943:36439 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T20:02:43,504 INFO [RS:0;54d0784d1943:36439 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T20:02:43,504 INFO [RS:0;54d0784d1943:36439 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T20:02:43,509 DEBUG [RS:1;54d0784d1943:33333 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T20:02:43,509 INFO [RS:1;54d0784d1943:33333 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T20:02:43,509 INFO [RS:1;54d0784d1943:33333 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T20:02:43,611 INFO [RS:1;54d0784d1943:33333 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T20:02:43,613 INFO [RS:2;54d0784d1943:39533 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T20:02:43,617 INFO [RS:0;54d0784d1943:36439 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T20:02:43,621 INFO [RS:0;54d0784d1943:36439 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=54d0784d1943%2C36439%2C1731528160987, suffix=, logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,36439,1731528160987, archiveDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs, maxLogs=32 2024-11-13T20:02:43,622 INFO [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=54d0784d1943%2C33333%2C1731528161197, suffix=, logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197, archiveDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs, maxLogs=32 2024-11-13T20:02:43,634 INFO [RS:2;54d0784d1943:39533 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=54d0784d1943%2C39533%2C1731528161273, suffix=, logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,39533,1731528161273, archiveDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs, maxLogs=32 2024-11-13T20:02:43,652 DEBUG [RS:1;54d0784d1943:33333 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197/54d0784d1943%2C33333%2C1731528161197.1731528163629, exclude list is [], retry=0 2024-11-13T20:02:43,656 WARN [IPC Server handler 3 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:43,656 WARN [IPC Server handler 3 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:43,656 WARN [IPC Server handler 3 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:43,658 DEBUG [RS:0;54d0784d1943:36439 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,36439,1731528160987/54d0784d1943%2C36439%2C1731528160987.1731528163630, exclude list is [], retry=0 2024-11-13T20:02:43,659 DEBUG [RS:2;54d0784d1943:39533 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,39533,1731528161273/54d0784d1943%2C39533%2C1731528161273.1731528163637, exclude list is [], retry=0 2024-11-13T20:02:43,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38925,DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c,DISK] 2024-11-13T20:02:43,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42549,DS-46f3123e-655f-491b-8abd-3b031664dcf7,DISK] 2024-11-13T20:02:43,664 WARN [IPC Server handler 0 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:43,664 WARN [IPC Server handler 0 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:43,664 WARN [IPC Server handler 0 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:43,664 WARN [IPC Server handler 3 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:43,664 WARN [IPC Server handler 3 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:43,664 WARN [IPC Server handler 3 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:43,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42549,DS-46f3123e-655f-491b-8abd-3b031664dcf7,DISK] 2024-11-13T20:02:43,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38925,DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c,DISK] 2024-11-13T20:02:43,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42549,DS-46f3123e-655f-491b-8abd-3b031664dcf7,DISK] 2024-11-13T20:02:43,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38925,DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c,DISK] 2024-11-13T20:02:43,759 INFO [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197/54d0784d1943%2C33333%2C1731528161197.1731528163629 2024-11-13T20:02:43,768 DEBUG [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44715:44715),(127.0.0.1/127.0.0.1:46171:46171)] 2024-11-13T20:02:43,770 INFO [RS:0;54d0784d1943:36439 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,36439,1731528160987/54d0784d1943%2C36439%2C1731528160987.1731528163630 2024-11-13T20:02:43,783 DEBUG [RS:0;54d0784d1943:36439 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46171:46171),(127.0.0.1/127.0.0.1:44715:44715)] 2024-11-13T20:02:43,799 INFO [RS:2;54d0784d1943:39533 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,39533,1731528161273/54d0784d1943%2C39533%2C1731528161273.1731528163637 2024-11-13T20:02:43,810 DEBUG [RS:2;54d0784d1943:39533 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46171:46171),(127.0.0.1/127.0.0.1:44715:44715)] 2024-11-13T20:02:43,845 DEBUG [54d0784d1943:34357 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-13T20:02:43,857 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:02:43,876 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:02:43,876 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:02:43,877 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:02:43,877 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:02:43,877 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:02:43,877 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:02:43,877 INFO [54d0784d1943:34357 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:02:43,877 INFO [54d0784d1943:34357 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:02:43,877 INFO [54d0784d1943:34357 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:02:43,877 DEBUG [54d0784d1943:34357 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:02:43,888 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:02:43,901 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 54d0784d1943,33333,1731528161197, state=OPENING 2024-11-13T20:02:43,907 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T20:02:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:43,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:43,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:43,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:43,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:43,912 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:43,919 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T20:02:43,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:02:44,115 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T20:02:44,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34803, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T20:02:44,134 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T20:02:44,134 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T20:02:44,135 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-13T20:02:44,138 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=54d0784d1943%2C33333%2C1731528161197.meta, suffix=.meta, logDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197, archiveDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs, maxLogs=32 2024-11-13T20:02:44,159 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197/54d0784d1943%2C33333%2C1731528161197.meta.1731528164141.meta, exclude list is [], retry=0 2024-11-13T20:02:44,163 WARN [IPC Server handler 0 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T20:02:44,163 WARN [IPC Server handler 0 on default port 43181 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T20:02:44,163 WARN [IPC Server handler 0 on default port 43181 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T20:02:44,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42549,DS-46f3123e-655f-491b-8abd-3b031664dcf7,DISK] 2024-11-13T20:02:44,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38925,DS-2bc2bef5-4555-4c18-b93b-8eb0d3466e5c,DISK] 2024-11-13T20:02:44,177 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/WALs/54d0784d1943,33333,1731528161197/54d0784d1943%2C33333%2C1731528161197.meta.1731528164141.meta 2024-11-13T20:02:44,184 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46171:46171),(127.0.0.1/127.0.0.1:44715:44715)] 2024-11-13T20:02:44,184 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T20:02:44,186 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-13T20:02:44,187 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:44,188 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T20:02:44,191 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T20:02:44,192 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T20:02:44,203 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T20:02:44,203 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:44,204 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T20:02:44,204 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T20:02:44,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T20:02:44,210 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T20:02:44,210 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:44,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:44,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T20:02:44,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T20:02:44,212 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:44,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:44,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T20:02:44,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T20:02:44,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:44,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:44,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T20:02:44,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T20:02:44,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:44,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T20:02:44,217 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T20:02:44,218 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740 2024-11-13T20:02:44,221 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740 2024-11-13T20:02:44,223 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T20:02:44,223 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T20:02:44,224 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T20:02:44,227 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T20:02:44,228 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62584254, jitterRate=-0.06742194294929504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T20:02:44,229 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T20:02:44,232 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731528164204Writing region info on filesystem at 1731528164205 (+1 ms)Initializing all the Stores at 1731528164207 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528164208 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528164208Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528164208Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528164208Cleaning up temporary data from old regions at 1731528164223 (+15 ms)Running coprocessor post-open hooks at 1731528164229 (+6 ms)Region opened successfully at 1731528164232 (+3 ms) 2024-11-13T20:02:44,239 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731528164105 2024-11-13T20:02:44,249 DEBUG [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T20:02:44,250 INFO [RS_OPEN_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T20:02:44,251 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:02:44,253 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 54d0784d1943,33333,1731528161197, state=OPEN 2024-11-13T20:02:44,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T20:02:44,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T20:02:44,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T20:02:44,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T20:02:44,256 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:44,256 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:44,256 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:44,256 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T20:02:44,256 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=54d0784d1943,33333,1731528161197 2024-11-13T20:02:44,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T20:02:44,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=54d0784d1943,33333,1731528161197 in 327 msec 2024-11-13T20:02:44,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T20:02:44,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.1070 sec 2024-11-13T20:02:44,275 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T20:02:44,275 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T20:02:44,299 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:44,301 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:44,324 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:44,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41239, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:44,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6460 sec 2024-11-13T20:02:44,354 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731528164354, completionTime=-1 2024-11-13T20:02:44,358 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-13T20:02:44,358 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-13T20:02:44,392 INFO [master/54d0784d1943:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-13T20:02:44,392 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731528224392 2024-11-13T20:02:44,393 INFO [master/54d0784d1943:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731528284393 2024-11-13T20:02:44,393 INFO [master/54d0784d1943:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 34 msec 2024-11-13T20:02:44,394 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:02:44,403 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,34357,1731528160092-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:44,404 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,34357,1731528160092-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:44,404 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,34357,1731528160092-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:44,406 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-54d0784d1943:34357, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:44,406 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:44,407 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:44,416 DEBUG [master/54d0784d1943:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T20:02:44,448 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.048sec 2024-11-13T20:02:44,459 INFO [master/54d0784d1943:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T20:02:44,461 INFO [master/54d0784d1943:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T20:02:44,462 INFO [master/54d0784d1943:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T20:02:44,463 INFO [master/54d0784d1943:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T20:02:44,463 INFO [master/54d0784d1943:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T20:02:44,464 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,34357,1731528160092-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T20:02:44,464 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,34357,1731528160092-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T20:02:44,501 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T20:02:44,504 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 54d0784d1943,34357,1731528160092 2024-11-13T20:02:44,508 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3769f247 2024-11-13T20:02:44,511 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T20:02:44,515 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43395, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T20:02:44,556 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T20:02:44,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-13T20:02:44,573 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:02:44,574 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:44,575 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-13T20:02:44,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30a4e35d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:44,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T20:02:44,584 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:02:44,596 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T20:02:44,596 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T20:02:44,617 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:02:44,627 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:02:44,649 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:02:44,653 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:02:44,654 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:02:44,654 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@600a067b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:44,654 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:02:44,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:02:44,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741837_1013 (size=349) 2024-11-13T20:02:44,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741837_1013 (size=349) 2024-11-13T20:02:44,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741837_1013 (size=349) 2024-11-13T20:02:44,671 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:44,673 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:02:44,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@144ce549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:44,677 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:44,679 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f7446bb3f02b60d610e520d40f8cc773, NAME => 'hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:44,691 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:44,691 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:44,695 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:44,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=54d0784d1943,34357,1731528160092 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/test.cache.data in system properties and HBase conf 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir in system properties and HBase conf 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T20:02:44,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T20:02:44,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/nfs.dump.dir in system properties and HBase conf 2024-11-13T20:02:44,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir in system properties and HBase conf 2024-11-13T20:02:44,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T20:02:44,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T20:02:44,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T20:02:44,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T20:02:44,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741838_1014 (size=36) 2024-11-13T20:02:44,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741838_1014 (size=36) 2024-11-13T20:02:44,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741838_1014 (size=36) 2024-11-13T20:02:44,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:44,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing f7446bb3f02b60d610e520d40f8cc773, disabling compactions & flushes 2024-11-13T20:02:44,743 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:44,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:44,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. after waiting 0 ms 2024-11-13T20:02:44,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:44,743 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:44,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for f7446bb3f02b60d610e520d40f8cc773: Waiting for close lock at 1731528164743Disabling compacts and flushes for region at 1731528164743Disabling writes for close at 1731528164743Writing region close event to WAL at 1731528164743Closed at 1731528164743 2024-11-13T20:02:44,749 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:02:44,760 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1731528164755"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528164755"}]},"ts":"1731528164755"} 2024-11-13T20:02:44,768 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T20:02:44,771 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:02:44,775 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528164772"}]},"ts":"1731528164772"} 2024-11-13T20:02:44,781 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-13T20:02:44,782 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:02:44,785 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:02:44,785 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:02:44,785 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:02:44,785 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:02:44,785 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:02:44,785 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:02:44,785 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:02:44,785 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:02:44,786 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:02:44,786 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:02:44,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=f7446bb3f02b60d610e520d40f8cc773, ASSIGN}] 2024-11-13T20:02:44,797 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=f7446bb3f02b60d610e520d40f8cc773, ASSIGN 2024-11-13T20:02:44,800 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=f7446bb3f02b60d610e520d40f8cc773, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:02:44,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T20:02:44,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741839_1015 (size=592039) 2024-11-13T20:02:44,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741839_1015 (size=592039) 2024-11-13T20:02:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741839_1015 (size=592039) 2024-11-13T20:02:44,962 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T20:02:44,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f7446bb3f02b60d610e520d40f8cc773, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:02:44,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=f7446bb3f02b60d610e520d40f8cc773, ASSIGN because future has completed 2024-11-13T20:02:44,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7446bb3f02b60d610e520d40f8cc773, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:02:45,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741840_1016 (size=1663647) 2024-11-13T20:02:45,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741840_1016 (size=1663647) 2024-11-13T20:02:45,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741840_1016 (size=1663647) 2024-11-13T20:02:45,138 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T20:02:45,153 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40673, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T20:02:45,165 INFO [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:45,166 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f7446bb3f02b60d610e520d40f8cc773, NAME => 'hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773.', STARTKEY => '', ENDKEY => ''} 2024-11-13T20:02:45,166 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. service=AccessControlService 2024-11-13T20:02:45,166 INFO [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:45,167 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,167 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:45,167 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,167 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,174 INFO [StoreOpener-f7446bb3f02b60d610e520d40f8cc773-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,178 INFO [StoreOpener-f7446bb3f02b60d610e520d40f8cc773-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7446bb3f02b60d610e520d40f8cc773 columnFamilyName l 2024-11-13T20:02:45,178 DEBUG [StoreOpener-f7446bb3f02b60d610e520d40f8cc773-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:45,180 INFO [StoreOpener-f7446bb3f02b60d610e520d40f8cc773-1 {}] regionserver.HStore(327): Store=f7446bb3f02b60d610e520d40f8cc773/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:02:45,184 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,186 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,187 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,189 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,189 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,194 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,210 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:02:45,211 INFO [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened f7446bb3f02b60d610e520d40f8cc773; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62317662, jitterRate=-0.07139447331428528}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:02:45,212 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:02:45,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f7446bb3f02b60d610e520d40f8cc773: Running coprocessor pre-open hook at 1731528165167Writing region info on filesystem at 1731528165167Initializing all the Stores at 1731528165169 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731528165169Cleaning up temporary data from old regions at 1731528165189 (+20 ms)Running coprocessor post-open hooks at 1731528165212 (+23 ms)Region opened successfully at 1731528165214 (+2 ms) 2024-11-13T20:02:45,218 INFO [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., pid=6, masterSystemTime=1731528165137 2024-11-13T20:02:45,223 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:45,223 INFO [RS_OPEN_PRIORITY_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:02:45,224 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f7446bb3f02b60d610e520d40f8cc773, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:02:45,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T20:02:45,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7446bb3f02b60d610e520d40f8cc773, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:02:45,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T20:02:45,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f7446bb3f02b60d610e520d40f8cc773, server=54d0784d1943,36439,1731528160987 in 267 msec 2024-11-13T20:02:45,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T20:02:45,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=f7446bb3f02b60d610e520d40f8cc773, ASSIGN in 469 msec 2024-11-13T20:02:45,267 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:02:45,267 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528165267"}]},"ts":"1731528165267"} 2024-11-13T20:02:45,271 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-13T20:02:45,279 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:02:45,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 719 msec 2024-11-13T20:02:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T20:02:45,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-13T20:02:45,796 DEBUG [master/54d0784d1943:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T20:02:45,797 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T20:02:45,798 INFO [master/54d0784d1943:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=54d0784d1943,34357,1731528160092-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T20:02:47,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741828_1004 (size=1189) 2024-11-13T20:02:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741831_1007 (size=1321) 2024-11-13T20:02:47,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741829_1005 (size=34) 2024-11-13T20:02:47,961 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:49,767 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:02:49,834 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-13T20:02:49,836 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T20:02:50,133 WARN [Thread-374 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:50,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:02:50,608 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-13T20:02:50,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T20:02:50,609 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T20:02:50,610 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-13T20:02:50,610 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-13T20:02:50,611 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:02:50,611 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-13T20:02:50,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-13T20:02:50,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-13T20:02:50,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:02:50,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-13T20:02:50,612 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T20:02:50,612 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T20:02:50,613 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T20:02:50,613 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T20:02:50,784 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:50,785 WARN [Thread-374 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-13T20:02:50,786 INFO [Thread-374 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:50,798 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:50,798 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:50,798 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T20:02:50,800 INFO [Thread-374 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:50,800 INFO [Thread-374 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:50,800 INFO [Thread-374 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T20:02:50,802 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:50,806 INFO [Thread-374 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@153516ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:50,807 INFO [Thread-374 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7979e0cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:50,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4adcba8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:50,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@295ba610{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:51,123 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-11-13T20:02:51,123 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-11-13T20:02:51,123 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-13T20:02:51,126 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-13T20:02:51,226 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:51,578 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:52,303 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-11-13T20:02:52,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54649c0f{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-36885-hadoop-yarn-common-3_4_1_jar-_-any-15357303923203052069/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-13T20:02:52,363 INFO [Thread-374 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38a3a1ea{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-42569-hadoop-yarn-common-3_4_1_jar-_-any-1159700531729879455/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-13T20:02:52,364 INFO [Thread-374 {}] server.AbstractConnector(333): Started ServerConnector@307e6889{HTTP/1.1, (http/1.1)}{localhost:42569} 2024-11-13T20:02:52,364 INFO [Thread-374 {}] server.Server(415): Started @20864ms 2024-11-13T20:02:52,370 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35ae0374{HTTP/1.1, (http/1.1)}{localhost:36885} 2024-11-13T20:02:52,370 INFO [Time-limited test {}] server.Server(415): Started @20869ms 2024-11-13T20:02:52,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741841_1017 (size=5) 2024-11-13T20:02:52,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741841_1017 (size=5) 2024-11-13T20:02:52,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741841_1017 (size=5) 2024-11-13T20:02:54,266 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-13T20:02:54,272 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:54,312 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-13T20:02:54,313 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:54,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:54,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:54,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T20:02:54,348 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:54,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57fdba5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:54,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e3f4c0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:54,468 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-13T20:02:54,468 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-13T20:02:54,468 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-13T20:02:54,468 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-13T20:02:54,485 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:54,524 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:54,879 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:54,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6002d397{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-45349-hadoop-yarn-common-3_4_1_jar-_-any-17461412073015697703/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T20:02:54,903 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f49b64c{HTTP/1.1, (http/1.1)}{localhost:45349} 2024-11-13T20:02:54,904 INFO [Time-limited test {}] server.Server(415): Started @23403ms 2024-11-13T20:02:55,365 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-13T20:02:55,369 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:55,400 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-13T20:02:55,411 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T20:02:55,441 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T20:02:55,441 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T20:02:55,441 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T20:02:55,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T20:02:55,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47180253{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,AVAILABLE} 2024-11-13T20:02:55,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68343f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T20:02:55,605 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-13T20:02:55,607 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-13T20:02:55,607 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-13T20:02:55,607 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-13T20:02:55,619 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:55,632 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:55,886 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T20:02:55,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79612663{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/java.io.tmpdir/jetty-localhost-40653-hadoop-yarn-common-3_4_1_jar-_-any-5370282999097733691/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T20:02:55,908 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f055f38{HTTP/1.1, (http/1.1)}{localhost:40653} 2024-11-13T20:02:55,917 INFO [Time-limited test {}] server.Server(415): Started @24408ms 2024-11-13T20:02:55,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-13T20:02:55,998 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:02:56,029 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=706, OpenFileDescriptor=747, MaxFileDescriptor=1048576, SystemLoadAverage=868, ProcessCount=13, AvailableMemoryMB=2678 2024-11-13T20:02:56,031 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=706 is superior to 500 2024-11-13T20:02:56,036 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T20:02:56,042 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 54d0784d1943,34357,1731528160092 2024-11-13T20:02:56,042 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@67da4f42 2024-11-13T20:02:56,042 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T20:02:56,045 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T20:02:56,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:02:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:02:56,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:02:56,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-13T20:02:56,064 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:02:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T20:02:56,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741842_1018 (size=442) 2024-11-13T20:02:56,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741842_1018 (size=442) 2024-11-13T20:02:56,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741842_1018 (size=442) 2024-11-13T20:02:56,109 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d102669d87efd9797c19fb08202748ad, NAME => 'testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:56,122 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 79ca5fb4b138c05dc4584ee433216f59, NAME => 'testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:02:56,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741843_1019 (size=67) 2024-11-13T20:02:56,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741843_1019 (size=67) 2024-11-13T20:02:56,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T20:02:56,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741843_1019 (size=67) 2024-11-13T20:02:56,194 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:56,194 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing d102669d87efd9797c19fb08202748ad, disabling compactions & flushes 2024-11-13T20:02:56,194 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,194 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,194 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. after waiting 0 ms 2024-11-13T20:02:56,194 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,194 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,194 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for d102669d87efd9797c19fb08202748ad: Waiting for close lock at 1731528176194Disabling compacts and flushes for region at 1731528176194Disabling writes for close at 1731528176194Writing region close event to WAL at 1731528176194Closed at 1731528176194 2024-11-13T20:02:56,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741844_1020 (size=67) 2024-11-13T20:02:56,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741844_1020 (size=67) 2024-11-13T20:02:56,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741844_1020 (size=67) 2024-11-13T20:02:56,258 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:56,258 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 79ca5fb4b138c05dc4584ee433216f59, disabling compactions & flushes 2024-11-13T20:02:56,258 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,258 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,258 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. after waiting 0 ms 2024-11-13T20:02:56,258 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,258 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,259 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 79ca5fb4b138c05dc4584ee433216f59: Waiting for close lock at 1731528176258Disabling compacts and flushes for region at 1731528176258Disabling writes for close at 1731528176258Writing region close event to WAL at 1731528176258Closed at 1731528176258 2024-11-13T20:02:56,277 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:02:56,278 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731528176277"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528176277"}]},"ts":"1731528176277"} 2024-11-13T20:02:56,278 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731528176277"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528176277"}]},"ts":"1731528176277"} 2024-11-13T20:02:56,335 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:02:56,348 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:02:56,348 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528176348"}]},"ts":"1731528176348"} 2024-11-13T20:02:56,368 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-13T20:02:56,369 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:02:56,377 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:02:56,377 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:02:56,378 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:02:56,378 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:02:56,378 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:02:56,378 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:02:56,378 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:02:56,378 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:02:56,378 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:02:56,378 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:02:56,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, ASSIGN}] 2024-11-13T20:02:56,382 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, ASSIGN 2024-11-13T20:02:56,382 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, ASSIGN 2024-11-13T20:02:56,384 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:02:56,384 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:02:56,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T20:02:56,535 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:02:56,537 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d102669d87efd9797c19fb08202748ad, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:02:56,537 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=79ca5fb4b138c05dc4584ee433216f59, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:02:56,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, ASSIGN because future has completed 2024-11-13T20:02:56,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure d102669d87efd9797c19fb08202748ad, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:02:56,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, ASSIGN because future has completed 2024-11-13T20:02:56,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79ca5fb4b138c05dc4584ee433216f59, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:02:56,706 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T20:02:56,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T20:02:56,729 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57875, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T20:02:56,745 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,745 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 79ca5fb4b138c05dc4584ee433216f59, NAME => 'testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:02:56,746 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. service=AccessControlService 2024-11-13T20:02:56,746 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:56,746 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,746 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:56,747 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,747 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,759 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,760 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => d102669d87efd9797c19fb08202748ad, NAME => 'testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:02:56,760 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. service=AccessControlService 2024-11-13T20:02:56,761 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:02:56,761 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,762 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:02:56,763 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,763 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,763 INFO [StoreOpener-79ca5fb4b138c05dc4584ee433216f59-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,768 INFO [StoreOpener-d102669d87efd9797c19fb08202748ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,777 INFO [StoreOpener-79ca5fb4b138c05dc4584ee433216f59-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 79ca5fb4b138c05dc4584ee433216f59 columnFamilyName cf 2024-11-13T20:02:56,777 INFO [StoreOpener-d102669d87efd9797c19fb08202748ad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d102669d87efd9797c19fb08202748ad columnFamilyName cf 2024-11-13T20:02:56,787 DEBUG [StoreOpener-d102669d87efd9797c19fb08202748ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:56,788 DEBUG [StoreOpener-79ca5fb4b138c05dc4584ee433216f59-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:56,788 INFO [StoreOpener-d102669d87efd9797c19fb08202748ad-1 {}] regionserver.HStore(327): Store=d102669d87efd9797c19fb08202748ad/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:02:56,792 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,794 INFO [StoreOpener-79ca5fb4b138c05dc4584ee433216f59-1 {}] regionserver.HStore(327): Store=79ca5fb4b138c05dc4584ee433216f59/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:02:56,794 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,795 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,797 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,797 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,797 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,798 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,798 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,799 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,799 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,807 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,809 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,829 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:02:56,830 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:02:56,831 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 79ca5fb4b138c05dc4584ee433216f59; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60321889, jitterRate=-0.1011338084936142}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:02:56,831 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened d102669d87efd9797c19fb08202748ad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70798069, jitterRate=0.05497343838214874}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:02:56,831 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:56,831 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:56,833 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for d102669d87efd9797c19fb08202748ad: Running coprocessor pre-open hook at 1731528176763Writing region info on filesystem at 1731528176763Initializing all the Stores at 1731528176766 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528176766Cleaning up temporary data from old regions at 1731528176799 (+33 ms)Running coprocessor post-open hooks at 1731528176831 (+32 ms)Region opened successfully at 1731528176833 (+2 ms) 2024-11-13T20:02:56,833 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 79ca5fb4b138c05dc4584ee433216f59: Running coprocessor pre-open hook at 1731528176747Writing region info on filesystem at 1731528176747Initializing all the Stores at 1731528176749 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528176753 (+4 ms)Cleaning up temporary data from old regions at 1731528176798 (+45 ms)Running coprocessor post-open hooks at 1731528176831 (+33 ms)Region opened successfully at 1731528176833 (+2 ms) 2024-11-13T20:02:56,836 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad., pid=10, masterSystemTime=1731528176705 2024-11-13T20:02:56,839 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59., pid=11, masterSystemTime=1731528176709 2024-11-13T20:02:56,840 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,840 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:56,841 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d102669d87efd9797c19fb08202748ad, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:02:56,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure d102669d87efd9797c19fb08202748ad, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:02:56,849 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,849 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:56,858 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=79ca5fb4b138c05dc4584ee433216f59, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:02:56,866 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=54d0784d1943,36439,1731528160987, table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T20:02:56,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79ca5fb4b138c05dc4584ee433216f59, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:02:56,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-13T20:02:56,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure d102669d87efd9797c19fb08202748ad, server=54d0784d1943,39533,1731528161273 in 324 msec 2024-11-13T20:02:56,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-13T20:02:56,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 79ca5fb4b138c05dc4584ee433216f59, server=54d0784d1943,36439,1731528160987 in 324 msec 2024-11-13T20:02:56,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, ASSIGN in 520 msec 2024-11-13T20:02:56,911 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:02:56,911 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528176911"}]},"ts":"1731528176911"} 2024-11-13T20:02:56,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-13T20:02:56,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, ASSIGN in 525 msec 2024-11-13T20:02:56,919 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-13T20:02:56,923 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:02:56,928 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-13T20:02:56,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:02:56,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:56,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42381, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:56,972 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:56,972 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:56,973 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:56,999 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60037, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:02:57,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:57,008 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38771, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,011 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-13T20:02:57,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T20:02:57,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T20:02:57,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T20:02:57,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T20:02:57,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:02:57,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T20:02:57,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T20:02:57,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 1.0220 sec 2024-11-13T20:02:57,082 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T20:02:57,089 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T20:02:57,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T20:02:57,219 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T20:02:57,224 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:57,251 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-13T20:02:57,252 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:57,253 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:02:57,260 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:57,279 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:57,292 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:57,296 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34088, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,300 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:57,320 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39734, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,326 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:57,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T20:02:57,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528177341 (current time:1731528177341). 2024-11-13T20:02:57,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:02:57,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-13T20:02:57,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:02:57,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4528de3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:57,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:02:57,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:02:57,348 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:02:57,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:02:57,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:02:57,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71450daf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:57,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:02:57,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:02:57,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:57,352 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43150, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:02:57,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ece9964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:57,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:57,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:57,360 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357. 2024-11-13T20:02:57,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:02:57,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:57,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:57,372 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:02:57,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142f9726, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:57,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:02:57,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:02:57,376 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:02:57,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:02:57,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:02:57,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2616fd29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:57,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:02:57,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:02:57,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:57,381 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:02:57,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12523445, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:57,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:57,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:57,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:57,394 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:02:57,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:57,403 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:57,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357. 2024-11-13T20:02:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:02:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:57,406 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:02:57,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-13T20:02:57,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:02:57,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T20:02:57,429 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:02:57,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-13T20:02:57,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T20:02:57,436 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:02:57,469 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:02:57,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741845_1021 (size=167) 2024-11-13T20:02:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741845_1021 (size=167) 2024-11-13T20:02:57,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741845_1021 (size=167) 2024-11-13T20:02:57,523 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:02:57,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59}] 2024-11-13T20:02:57,541 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:57,542 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T20:02:57,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-13T20:02:57,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-13T20:02:57,710 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:57,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:57,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for d102669d87efd9797c19fb08202748ad: 2024-11-13T20:02:57,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. for emptySnaptb0-testExportWithTargetName completed. 2024-11-13T20:02:57,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-13T20:02:57,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 79ca5fb4b138c05dc4584ee433216f59: 2024-11-13T20:02:57,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. for emptySnaptb0-testExportWithTargetName completed. 2024-11-13T20:02:57,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-13T20:02:57,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:02:57,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:02:57,725 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:02:57,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:02:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T20:02:57,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741846_1022 (size=70) 2024-11-13T20:02:57,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741846_1022 (size=70) 2024-11-13T20:02:57,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741846_1022 (size=70) 2024-11-13T20:02:57,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:57,828 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-13T20:02:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-13T20:02:57,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:57,837 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:57,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad in 314 msec 2024-11-13T20:02:57,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741847_1023 (size=70) 2024-11-13T20:02:57,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741847_1023 (size=70) 2024-11-13T20:02:57,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:57,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-13T20:02:57,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741847_1023 (size=70) 2024-11-13T20:02:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-13T20:02:57,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:57,887 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:57,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-13T20:02:57,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59 in 364 msec 2024-11-13T20:02:57,905 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:02:57,910 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:02:57,929 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:02:57,929 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:02:57,930 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:57,931 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:02:58,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741848_1024 (size=62) 2024-11-13T20:02:58,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741848_1024 (size=62) 2024-11-13T20:02:58,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741848_1024 (size=62) 2024-11-13T20:02:58,065 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:02:58,065 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-13T20:02:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T20:02:58,077 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-13T20:02:58,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741849_1025 (size=649) 2024-11-13T20:02:58,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741849_1025 (size=649) 2024-11-13T20:02:58,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741849_1025 (size=649) 2024-11-13T20:02:58,250 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:02:58,291 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:02:58,293 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-13T20:02:58,298 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:02:58,298 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-13T20:02:58,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 877 msec 2024-11-13T20:02:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T20:02:58,577 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T20:02:58,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:02:58,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:02:58,629 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:58,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-13T20:02:58,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:58,636 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:02:58,643 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:58,683 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:58,698 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T20:02:58,703 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T20:02:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528178703 (current time:1731528178703). 2024-11-13T20:02:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:02:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-13T20:02:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:02:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5992f0db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:02:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:02:58,709 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:02:58,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:02:58,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:02:58,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22945143, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:58,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:02:58,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:02:58,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:58,712 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:02:58,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e40fc8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:58,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:58,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:58,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:58,727 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:58,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357. 2024-11-13T20:02:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:02:58,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:58,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:58,735 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:02:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b45de66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:02:58,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:02:58,741 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:02:58,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:02:58,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:02:58,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f9792dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:58,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:02:58,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:02:58,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:58,745 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:02:58,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521e1d13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:02:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:02:58,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:02:58,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:58,754 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:58,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:02:58,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:02:58,760 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39752, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:02:58,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357. 2024-11-13T20:02:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:02:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:02:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-13T20:02:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:02:58,765 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:02:58,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T20:02:58,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-13T20:02:58,774 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:02:58,777 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:02:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T20:02:58,783 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:02:58,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741850_1026 (size=162) 2024-11-13T20:02:58,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741850_1026 (size=162) 2024-11-13T20:02:58,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741850_1026 (size=162) 2024-11-13T20:02:58,859 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:02:58,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59}] 2024-11-13T20:02:58,863 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:58,864 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:58,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T20:02:59,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-13T20:02:59,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-13T20:02:59,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:59,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:02:59,043 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing d102669d87efd9797c19fb08202748ad 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-13T20:02:59,044 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 79ca5fb4b138c05dc4584ee433216f59 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-13T20:02:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T20:02:59,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59 is 71, key is 1163d0ba5f3ba8741b28d1622a47139a/cf:q/1731528178616/Put/seqid=0 2024-11-13T20:02:59,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad is 71, key is 084620cf2994b5f7dcd4f83237232dba/cf:q/1731528178606/Put/seqid=0 2024-11-13T20:02:59,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741852_1028 (size=8101) 2024-11-13T20:02:59,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741852_1028 (size=8101) 2024-11-13T20:02:59,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741852_1028 (size=8101) 2024-11-13T20:02:59,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:59,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T20:02:59,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741851_1027 (size=5171) 2024-11-13T20:02:59,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741851_1027 (size=5171) 2024-11-13T20:02:59,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:02:59,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741851_1027 (size=5171) 2024-11-13T20:02:59,567 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:02:59,567 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:59,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/.tmp/cf/32a23abe656b454883c916bfbca4a336, store: [table=testtb-testExportWithTargetName family=cf region=d102669d87efd9797c19fb08202748ad] 2024-11-13T20:02:59,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/.tmp/cf/07cdbc0763a547f2999f27b4dd0edc04, store: [table=testtb-testExportWithTargetName family=cf region=79ca5fb4b138c05dc4584ee433216f59] 2024-11-13T20:02:59,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/.tmp/cf/07cdbc0763a547f2999f27b4dd0edc04 is 208, key is 18df009cb3627d245fd5a6c5e5d9df114/cf:q/1731528178616/Put/seqid=0 2024-11-13T20:02:59,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/.tmp/cf/32a23abe656b454883c916bfbca4a336 is 208, key is 0c50b72e3c29f54adb074936a1dbaf806/cf:q/1731528178606/Put/seqid=0 2024-11-13T20:02:59,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741853_1029 (size=14745) 2024-11-13T20:02:59,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741853_1029 (size=14745) 2024-11-13T20:02:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741853_1029 (size=14745) 2024-11-13T20:02:59,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/.tmp/cf/07cdbc0763a547f2999f27b4dd0edc04 2024-11-13T20:02:59,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741854_1030 (size=6116) 2024-11-13T20:02:59,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741854_1030 (size=6116) 2024-11-13T20:02:59,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741854_1030 (size=6116) 2024-11-13T20:02:59,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/.tmp/cf/32a23abe656b454883c916bfbca4a336 2024-11-13T20:02:59,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/.tmp/cf/32a23abe656b454883c916bfbca4a336 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf/32a23abe656b454883c916bfbca4a336 2024-11-13T20:02:59,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/.tmp/cf/07cdbc0763a547f2999f27b4dd0edc04 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf/07cdbc0763a547f2999f27b4dd0edc04 2024-11-13T20:02:59,740 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf/07cdbc0763a547f2999f27b4dd0edc04, entries=46, sequenceid=6, filesize=14.4 K 2024-11-13T20:02:59,764 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 79ca5fb4b138c05dc4584ee433216f59 in 706ms, sequenceid=6, compaction requested=false 2024-11-13T20:02:59,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-13T20:02:59,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 79ca5fb4b138c05dc4584ee433216f59: 2024-11-13T20:02:59,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. for snaptb0-testExportWithTargetName completed. 2024-11-13T20:02:59,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-13T20:02:59,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:02:59,766 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf/07cdbc0763a547f2999f27b4dd0edc04] hfiles 2024-11-13T20:02:59,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf/32a23abe656b454883c916bfbca4a336, entries=4, sequenceid=6, filesize=6.0 K 2024-11-13T20:02:59,771 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for d102669d87efd9797c19fb08202748ad in 748ms, sequenceid=6, compaction requested=false 2024-11-13T20:02:59,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for d102669d87efd9797c19fb08202748ad: 2024-11-13T20:02:59,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. for snaptb0-testExportWithTargetName completed. 2024-11-13T20:02:59,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-13T20:02:59,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:02:59,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf/32a23abe656b454883c916bfbca4a336] hfiles 2024-11-13T20:02:59,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf/07cdbc0763a547f2999f27b4dd0edc04 for snapshot=snaptb0-testExportWithTargetName 2024-11-13T20:02:59,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf/32a23abe656b454883c916bfbca4a336 for snapshot=snaptb0-testExportWithTargetName 2024-11-13T20:02:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T20:02:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741855_1031 (size=109) 2024-11-13T20:02:59,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741855_1031 (size=109) 2024-11-13T20:02:59,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741855_1031 (size=109) 2024-11-13T20:02:59,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:02:59,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-13T20:02:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-13T20:02:59,952 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:59,953 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad 2024-11-13T20:02:59,960 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d102669d87efd9797c19fb08202748ad in 1.0980 sec 2024-11-13T20:03:00,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741856_1032 (size=109) 2024-11-13T20:03:00,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741856_1032 (size=109) 2024-11-13T20:03:00,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741856_1032 (size=109) 2024-11-13T20:03:00,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:03:00,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-13T20:03:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-13T20:03:00,003 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:00,003 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:00,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-13T20:03:00,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 79ca5fb4b138c05dc4584ee433216f59 in 1.1470 sec 2024-11-13T20:03:00,013 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:03:00,014 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:03:00,050 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:03:00,050 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:03:00,050 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:00,056 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad] hfiles 2024-11-13T20:03:00,056 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:00,056 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad 2024-11-13T20:03:00,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741857_1033 (size=293) 2024-11-13T20:03:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741857_1033 (size=293) 2024-11-13T20:03:00,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741857_1033 (size=293) 2024-11-13T20:03:00,131 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:03:00,131 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-13T20:03:00,133 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-13T20:03:00,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741858_1034 (size=959) 2024-11-13T20:03:00,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741858_1034 (size=959) 2024-11-13T20:03:00,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741858_1034 (size=959) 2024-11-13T20:03:00,286 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:03:00,319 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:03:00,321 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-13T20:03:00,325 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:03:00,325 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-13T20:03:00,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.5600 sec 2024-11-13T20:03:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-13T20:03:00,608 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-13T20:03:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T20:03:00,926 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T20:03:00,927 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927 2024-11-13T20:03:00,928 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:01,016 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:01,017 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-13T20:03:01,023 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:03:01,036 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-13T20:03:01,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741860_1036 (size=959) 2024-11-13T20:03:01,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741860_1036 (size=959) 2024-11-13T20:03:01,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741860_1036 (size=959) 2024-11-13T20:03:01,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741859_1035 (size=162) 2024-11-13T20:03:01,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741859_1035 (size=162) 2024-11-13T20:03:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741859_1035 (size=162) 2024-11-13T20:03:01,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741861_1037 (size=154) 2024-11-13T20:03:01,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741861_1037 (size=154) 2024-11-13T20:03:01,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741861_1037 (size=154) 2024-11-13T20:03:01,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:01,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:01,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,295 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:03:02,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-4057885653395168920.jar 2024-11-13T20:03:02,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-3893681724788325671.jar 2024-11-13T20:03:02,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:02,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:03:02,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:03:02,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:03:02,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:03:02,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:03:02,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:03:02,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:03:02,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:03:02,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:03:02,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:03:02,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:03:02,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:02,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:02,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:03:02,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:02,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:02,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:03:02,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:03:03,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741862_1038 (size=131440) 2024-11-13T20:03:03,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741862_1038 (size=131440) 2024-11-13T20:03:03,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741862_1038 (size=131440) 2024-11-13T20:03:03,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741863_1039 (size=4188619) 2024-11-13T20:03:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741863_1039 (size=4188619) 2024-11-13T20:03:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741863_1039 (size=4188619) 2024-11-13T20:03:03,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741864_1040 (size=1323991) 2024-11-13T20:03:03,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741864_1040 (size=1323991) 2024-11-13T20:03:03,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741864_1040 (size=1323991) 2024-11-13T20:03:03,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741865_1041 (size=903730) 2024-11-13T20:03:03,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741865_1041 (size=903730) 2024-11-13T20:03:03,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741865_1041 (size=903730) 2024-11-13T20:03:03,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741866_1042 (size=8360083) 2024-11-13T20:03:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741866_1042 (size=8360083) 2024-11-13T20:03:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741866_1042 (size=8360083) 2024-11-13T20:03:03,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741867_1043 (size=1877034) 2024-11-13T20:03:03,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741867_1043 (size=1877034) 2024-11-13T20:03:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741867_1043 (size=1877034) 2024-11-13T20:03:04,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741868_1044 (size=77835) 2024-11-13T20:03:04,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741868_1044 (size=77835) 2024-11-13T20:03:04,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741868_1044 (size=77835) 2024-11-13T20:03:04,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741869_1045 (size=30949) 2024-11-13T20:03:04,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741869_1045 (size=30949) 2024-11-13T20:03:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741869_1045 (size=30949) 2024-11-13T20:03:04,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741870_1046 (size=1597327) 2024-11-13T20:03:04,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741870_1046 (size=1597327) 2024-11-13T20:03:04,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741870_1046 (size=1597327) 2024-11-13T20:03:04,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741871_1047 (size=4695811) 2024-11-13T20:03:04,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741871_1047 (size=4695811) 2024-11-13T20:03:04,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741871_1047 (size=4695811) 2024-11-13T20:03:04,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741872_1048 (size=232957) 2024-11-13T20:03:04,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741872_1048 (size=232957) 2024-11-13T20:03:04,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741872_1048 (size=232957) 2024-11-13T20:03:04,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741873_1049 (size=440392) 2024-11-13T20:03:04,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741873_1049 (size=440392) 2024-11-13T20:03:04,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741873_1049 (size=440392) 2024-11-13T20:03:04,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741874_1050 (size=127628) 2024-11-13T20:03:04,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741874_1050 (size=127628) 2024-11-13T20:03:04,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741874_1050 (size=127628) 2024-11-13T20:03:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741875_1051 (size=20406) 2024-11-13T20:03:04,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741875_1051 (size=20406) 2024-11-13T20:03:04,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741875_1051 (size=20406) 2024-11-13T20:03:05,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741876_1052 (size=5175431) 2024-11-13T20:03:05,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741876_1052 (size=5175431) 2024-11-13T20:03:05,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741876_1052 (size=5175431) 2024-11-13T20:03:05,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741877_1053 (size=217634) 2024-11-13T20:03:05,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741877_1053 (size=217634) 2024-11-13T20:03:05,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741877_1053 (size=217634) 2024-11-13T20:03:05,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741878_1054 (size=1832290) 2024-11-13T20:03:05,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741878_1054 (size=1832290) 2024-11-13T20:03:05,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741878_1054 (size=1832290) 2024-11-13T20:03:05,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741879_1055 (size=322274) 2024-11-13T20:03:05,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741879_1055 (size=322274) 2024-11-13T20:03:05,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741879_1055 (size=322274) 2024-11-13T20:03:05,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741880_1056 (size=503880) 2024-11-13T20:03:05,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741880_1056 (size=503880) 2024-11-13T20:03:05,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741880_1056 (size=503880) 2024-11-13T20:03:05,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741881_1057 (size=6424542) 2024-11-13T20:03:05,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741881_1057 (size=6424542) 2024-11-13T20:03:05,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741881_1057 (size=6424542) 2024-11-13T20:03:05,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741882_1058 (size=29229) 2024-11-13T20:03:05,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741882_1058 (size=29229) 2024-11-13T20:03:05,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741882_1058 (size=29229) 2024-11-13T20:03:05,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741883_1059 (size=24096) 2024-11-13T20:03:05,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741883_1059 (size=24096) 2024-11-13T20:03:05,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741883_1059 (size=24096) 2024-11-13T20:03:05,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741884_1060 (size=111872) 2024-11-13T20:03:05,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741884_1060 (size=111872) 2024-11-13T20:03:05,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741884_1060 (size=111872) 2024-11-13T20:03:06,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741885_1061 (size=45609) 2024-11-13T20:03:06,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741885_1061 (size=45609) 2024-11-13T20:03:06,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741885_1061 (size=45609) 2024-11-13T20:03:06,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741886_1062 (size=136454) 2024-11-13T20:03:06,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741886_1062 (size=136454) 2024-11-13T20:03:06,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741886_1062 (size=136454) 2024-11-13T20:03:06,479 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:03:06,491 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-13T20:03:06,521 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.3 K 2024-11-13T20:03:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741887_1063 (size=722) 2024-11-13T20:03:06,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741887_1063 (size=722) 2024-11-13T20:03:06,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741887_1063 (size=722) 2024-11-13T20:03:06,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741888_1064 (size=15) 2024-11-13T20:03:06,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741888_1064 (size=15) 2024-11-13T20:03:06,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741888_1064 (size=15) 2024-11-13T20:03:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741889_1065 (size=303731) 2024-11-13T20:03:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741889_1065 (size=303731) 2024-11-13T20:03:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741889_1065 (size=303731) 2024-11-13T20:03:07,456 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:03:07,457 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:03:08,092 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0001_000001 (auth:SIMPLE) from 127.0.0.1:53304 2024-11-13T20:03:08,848 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:03:18,932 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0001_000001 (auth:SIMPLE) from 127.0.0.1:59026 2024-11-13T20:03:19,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741890_1066 (size=349381) 2024-11-13T20:03:19,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741890_1066 (size=349381) 2024-11-13T20:03:19,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741890_1066 (size=349381) 2024-11-13T20:03:21,555 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0001_000001 (auth:SIMPLE) from 127.0.0.1:60612 2024-11-13T20:03:25,802 INFO [master/54d0784d1943:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T20:03:25,803 INFO [master/54d0784d1943:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T20:03:27,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741891_1067 (size=14745) 2024-11-13T20:03:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741891_1067 (size=14745) 2024-11-13T20:03:27,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741891_1067 (size=14745) 2024-11-13T20:03:27,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741892_1068 (size=8101) 2024-11-13T20:03:27,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741892_1068 (size=8101) 2024-11-13T20:03:27,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741892_1068 (size=8101) 2024-11-13T20:03:27,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741893_1069 (size=6116) 2024-11-13T20:03:27,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741893_1069 (size=6116) 2024-11-13T20:03:27,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741893_1069 (size=6116) 2024-11-13T20:03:27,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741894_1070 (size=5171) 2024-11-13T20:03:27,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741894_1070 (size=5171) 2024-11-13T20:03:27,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741894_1070 (size=5171) 2024-11-13T20:03:27,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741895_1071 (size=17461) 2024-11-13T20:03:27,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741895_1071 (size=17461) 2024-11-13T20:03:27,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741895_1071 (size=17461) 2024-11-13T20:03:27,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741896_1072 (size=464) 2024-11-13T20:03:27,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741896_1072 (size=464) 2024-11-13T20:03:27,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741896_1072 (size=464) 2024-11-13T20:03:27,854 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0001/container_1731528172678_0001_01_000002/launch_container.sh] 2024-11-13T20:03:27,855 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0001/container_1731528172678_0001_01_000002/container_tokens] 2024-11-13T20:03:27,855 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0001/container_1731528172678_0001_01_000002/sysfs] 2024-11-13T20:03:28,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741897_1073 (size=17461) 2024-11-13T20:03:28,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741897_1073 (size=17461) 2024-11-13T20:03:28,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741897_1073 (size=17461) 2024-11-13T20:03:28,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741898_1074 (size=349381) 2024-11-13T20:03:28,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741898_1074 (size=349381) 2024-11-13T20:03:28,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741898_1074 (size=349381) 2024-11-13T20:03:30,099 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:03:30,102 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:03:30,130 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-13T20:03:30,131 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:03:30,132 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:03:30,133 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-13T20:03:30,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-13T20:03:30,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-13T20:03:30,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927/.hbase-snapshot/testExportWithTargetName 2024-11-13T20:03:30,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-13T20:03:30,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528180927/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-13T20:03:30,158 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-13T20:03:30,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-13T20:03:30,168 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528210167"}]},"ts":"1731528210167"} 2024-11-13T20:03:30,170 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-13T20:03:30,170 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-13T20:03:30,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-13T20:03:30,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, UNASSIGN}] 2024-11-13T20:03:30,179 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, UNASSIGN 2024-11-13T20:03:30,179 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, UNASSIGN 2024-11-13T20:03:30,181 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=79ca5fb4b138c05dc4584ee433216f59, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:03:30,181 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=d102669d87efd9797c19fb08202748ad, regionState=CLOSING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:03:30,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, UNASSIGN because future has completed 2024-11-13T20:03:30,184 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:03:30,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure 79ca5fb4b138c05dc4584ee433216f59, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:03:30,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, UNASSIGN because future has completed 2024-11-13T20:03:30,188 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:03:30,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure d102669d87efd9797c19fb08202748ad, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:03:30,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-13T20:03:30,343 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close d102669d87efd9797c19fb08202748ad 2024-11-13T20:03:30,343 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:30,343 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:03:30,343 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing d102669d87efd9797c19fb08202748ad, disabling compactions & flushes 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing 79ca5fb4b138c05dc4584ee433216f59, disabling compactions & flushes 2024-11-13T20:03:30,344 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:03:30,344 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. after waiting 0 ms 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. after waiting 0 ms 2024-11-13T20:03:30,344 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:03:30,354 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:03:30,358 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:03:30,358 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad. 2024-11-13T20:03:30,359 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for d102669d87efd9797c19fb08202748ad: Waiting for close lock at 1731528210344Running coprocessor pre-close hooks at 1731528210344Disabling compacts and flushes for region at 1731528210344Disabling writes for close at 1731528210344Writing region close event to WAL at 1731528210346 (+2 ms)Running coprocessor post-close hooks at 1731528210355 (+9 ms)Closed at 1731528210358 (+3 ms) 2024-11-13T20:03:30,363 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed d102669d87efd9797c19fb08202748ad 2024-11-13T20:03:30,363 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:03:30,364 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=d102669d87efd9797c19fb08202748ad, regionState=CLOSED 2024-11-13T20:03:30,366 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:03:30,366 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59. 2024-11-13T20:03:30,366 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for 79ca5fb4b138c05dc4584ee433216f59: Waiting for close lock at 1731528210344Running coprocessor pre-close hooks at 1731528210344Disabling compacts and flushes for region at 1731528210344Disabling writes for close at 1731528210344Writing region close event to WAL at 1731528210346 (+2 ms)Running coprocessor post-close hooks at 1731528210366 (+20 ms)Closed at 1731528210366 2024-11-13T20:03:30,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure d102669d87efd9797c19fb08202748ad, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:03:30,369 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed 79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:30,371 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=79ca5fb4b138c05dc4584ee433216f59, regionState=CLOSED 2024-11-13T20:03:30,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=20 2024-11-13T20:03:30,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure d102669d87efd9797c19fb08202748ad, server=54d0784d1943,39533,1731528161273 in 181 msec 2024-11-13T20:03:30,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure 79ca5fb4b138c05dc4584ee433216f59, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:03:30,378 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d102669d87efd9797c19fb08202748ad, UNASSIGN in 195 msec 2024-11-13T20:03:30,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-13T20:03:30,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure 79ca5fb4b138c05dc4584ee433216f59, server=54d0784d1943,36439,1731528160987 in 194 msec 2024-11-13T20:03:30,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-13T20:03:30,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=79ca5fb4b138c05dc4584ee433216f59, UNASSIGN in 206 msec 2024-11-13T20:03:30,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-13T20:03:30,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 220 msec 2024-11-13T20:03:30,403 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528210402"}]},"ts":"1731528210402"} 2024-11-13T20:03:30,405 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-13T20:03:30,405 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-13T20:03:30,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 246 msec 2024-11-13T20:03:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-13T20:03:30,487 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T20:03:30,492 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-13T20:03:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,500 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-13T20:03:30,502 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,506 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-13T20:03:30,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T20:03:30,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T20:03:30,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T20:03:30,515 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad 2024-11-13T20:03:30,515 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:30,520 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T20:03:30,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:30,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:30,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-13T20:03:30,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T20:03:30,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:30,539 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/recovered.edits] 2024-11-13T20:03:30,539 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/recovered.edits] 2024-11-13T20:03:30,550 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf/07cdbc0763a547f2999f27b4dd0edc04 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/cf/07cdbc0763a547f2999f27b4dd0edc04 2024-11-13T20:03:30,555 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf/32a23abe656b454883c916bfbca4a336 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/cf/32a23abe656b454883c916bfbca4a336 2024-11-13T20:03:30,556 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59/recovered.edits/9.seqid 2024-11-13T20:03:30,557 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:30,560 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad/recovered.edits/9.seqid 2024-11-13T20:03:30,561 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithTargetName/d102669d87efd9797c19fb08202748ad 2024-11-13T20:03:30,561 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-13T20:03:30,568 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-13T20:03:30,569 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-11-13T20:03:30,585 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b2024111388985d27a17044f08cedc6724a7003bc_79ca5fb4b138c05dc4584ee433216f59 2024-11-13T20:03:30,588 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241113c8cffb70546943559097a0981abd51af_d102669d87efd9797c19fb08202748ad 2024-11-13T20:03:30,589 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-13T20:03:30,594 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-13T20:03:30,607 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-13T20:03:30,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-13T20:03:30,618 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-13T20:03:30,625 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,626 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-13T20:03:30,626 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528210626"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:30,626 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528210626"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:30,631 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:03:30,631 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d102669d87efd9797c19fb08202748ad, NAME => 'testtb-testExportWithTargetName,,1731528176046.d102669d87efd9797c19fb08202748ad.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 79ca5fb4b138c05dc4584ee433216f59, NAME => 'testtb-testExportWithTargetName,1,1731528176046.79ca5fb4b138c05dc4584ee433216f59.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:03:30,631 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-13T20:03:30,632 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528210631"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-13T20:03:30,639 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-13T20:03:30,643 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T20:03:30,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 150 msec 2024-11-13T20:03:30,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-13T20:03:30,847 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-13T20:03:30,847 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T20:03:30,866 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-13T20:03:30,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-13T20:03:30,874 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-13T20:03:30,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-13T20:03:30,920 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=757 (was 706) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS_OPEN_REGION-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:58922 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:44133 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:45378 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1359899434_1 at /127.0.0.1:54132 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 25264) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1359899434_1 at /127.0.0.1:45338 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44133 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1282 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=773 (was 747) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1035 (was 868) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=505 (was 2678) 2024-11-13T20:03:30,921 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=757 is superior to 500 2024-11-13T20:03:30,947 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=757, OpenFileDescriptor=773, MaxFileDescriptor=1048576, SystemLoadAverage=1035, ProcessCount=19, AvailableMemoryMB=504 2024-11-13T20:03:30,947 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=757 is superior to 500 2024-11-13T20:03:30,949 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:03:30,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:30,953 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:03:30,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-13T20:03:30,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T20:03:30,956 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:03:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741899_1075 (size=440) 2024-11-13T20:03:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741899_1075 (size=440) 2024-11-13T20:03:30,979 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 12abba96fe9236990c9b91820b0bfa3e, NAME => 'testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:30,979 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 39f4207c186681deb167cd33afbd9984, NAME => 'testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:30,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741899_1075 (size=440) 2024-11-13T20:03:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741900_1076 (size=65) 2024-11-13T20:03:31,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741900_1076 (size=65) 2024-11-13T20:03:31,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741900_1076 (size=65) 2024-11-13T20:03:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741901_1077 (size=65) 2024-11-13T20:03:31,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741901_1077 (size=65) 2024-11-13T20:03:31,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741901_1077 (size=65) 2024-11-13T20:03:31,014 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:31,014 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:31,014 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 39f4207c186681deb167cd33afbd9984, disabling compactions & flushes 2024-11-13T20:03:31,014 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 12abba96fe9236990c9b91820b0bfa3e, disabling compactions & flushes 2024-11-13T20:03:31,015 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,015 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. after waiting 0 ms 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. after waiting 0 ms 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,015 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,015 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 12abba96fe9236990c9b91820b0bfa3e: Waiting for close lock at 1731528211014Disabling compacts and flushes for region at 1731528211014Disabling writes for close at 1731528211015 (+1 ms)Writing region close event to WAL at 1731528211015Closed at 1731528211015 2024-11-13T20:03:31,015 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 39f4207c186681deb167cd33afbd9984: Waiting for close lock at 1731528211014Disabling compacts and flushes for region at 1731528211014Disabling writes for close at 1731528211015 (+1 ms)Writing region close event to WAL at 1731528211015Closed at 1731528211015 2024-11-13T20:03:31,017 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:03:31,018 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731528211017"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528211017"}]},"ts":"1731528211017"} 2024-11-13T20:03:31,018 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731528211017"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528211017"}]},"ts":"1731528211017"} 2024-11-13T20:03:31,023 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:03:31,024 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:03:31,025 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528211024"}]},"ts":"1731528211024"} 2024-11-13T20:03:31,028 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-13T20:03:31,028 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:03:31,030 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:03:31,030 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:03:31,030 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:03:31,030 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:03:31,030 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:03:31,030 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:03:31,030 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:03:31,030 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:03:31,031 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:03:31,031 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:03:31,031 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, ASSIGN}] 2024-11-13T20:03:31,033 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, ASSIGN 2024-11-13T20:03:31,033 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, ASSIGN 2024-11-13T20:03:31,035 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:03:31,035 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:03:31,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T20:03:31,185 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:03:31,186 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=12abba96fe9236990c9b91820b0bfa3e, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:03:31,186 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=39f4207c186681deb167cd33afbd9984, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:31,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, ASSIGN because future has completed 2024-11-13T20:03:31,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 12abba96fe9236990c9b91820b0bfa3e, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:03:31,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, ASSIGN because future has completed 2024-11-13T20:03:31,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39f4207c186681deb167cd33afbd9984, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:03:31,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T20:03:31,354 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,354 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => 12abba96fe9236990c9b91820b0bfa3e, NAME => 'testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:03:31,355 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. service=AccessControlService 2024-11-13T20:03:31,355 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:03:31,355 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,355 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:31,355 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,355 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,357 INFO [StoreOpener-12abba96fe9236990c9b91820b0bfa3e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,359 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,360 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => 39f4207c186681deb167cd33afbd9984, NAME => 'testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:03:31,360 INFO [StoreOpener-12abba96fe9236990c9b91820b0bfa3e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 12abba96fe9236990c9b91820b0bfa3e columnFamilyName cf 2024-11-13T20:03:31,360 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. service=AccessControlService 2024-11-13T20:03:31,360 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:03:31,360 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,360 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:31,361 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,361 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,364 DEBUG [StoreOpener-12abba96fe9236990c9b91820b0bfa3e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:31,364 INFO [StoreOpener-39f4207c186681deb167cd33afbd9984-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,365 INFO [StoreOpener-12abba96fe9236990c9b91820b0bfa3e-1 {}] regionserver.HStore(327): Store=12abba96fe9236990c9b91820b0bfa3e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:03:31,365 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,366 INFO [StoreOpener-39f4207c186681deb167cd33afbd9984-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 39f4207c186681deb167cd33afbd9984 columnFamilyName cf 2024-11-13T20:03:31,366 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,367 DEBUG [StoreOpener-39f4207c186681deb167cd33afbd9984-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:31,367 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,367 INFO [StoreOpener-39f4207c186681deb167cd33afbd9984-1 {}] regionserver.HStore(327): Store=39f4207c186681deb167cd33afbd9984/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:03:31,368 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,368 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,368 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,369 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,369 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,370 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,370 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,370 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,372 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,375 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:03:31,376 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:03:31,376 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened 12abba96fe9236990c9b91820b0bfa3e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71060436, jitterRate=0.05888301134109497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:03:31,376 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,376 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened 39f4207c186681deb167cd33afbd9984; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74232624, jitterRate=0.10615229606628418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:03:31,376 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,377 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for 12abba96fe9236990c9b91820b0bfa3e: Running coprocessor pre-open hook at 1731528211356Writing region info on filesystem at 1731528211356Initializing all the Stores at 1731528211357 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528211357Cleaning up temporary data from old regions at 1731528211368 (+11 ms)Running coprocessor post-open hooks at 1731528211376 (+8 ms)Region opened successfully at 1731528211377 (+1 ms) 2024-11-13T20:03:31,377 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for 39f4207c186681deb167cd33afbd9984: Running coprocessor pre-open hook at 1731528211361Writing region info on filesystem at 1731528211361Initializing all the Stores at 1731528211364 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528211364Cleaning up temporary data from old regions at 1731528211370 (+6 ms)Running coprocessor post-open hooks at 1731528211376 (+6 ms)Region opened successfully at 1731528211377 (+1 ms) 2024-11-13T20:03:31,378 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984., pid=29, masterSystemTime=1731528211355 2024-11-13T20:03:31,379 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e., pid=28, masterSystemTime=1731528211350 2024-11-13T20:03:31,382 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,382 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,388 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,388 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,388 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=39f4207c186681deb167cd33afbd9984, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:31,388 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=12abba96fe9236990c9b91820b0bfa3e, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:03:31,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39f4207c186681deb167cd33afbd9984, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:03:31,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 12abba96fe9236990c9b91820b0bfa3e, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:03:31,401 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=27 2024-11-13T20:03:31,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure 39f4207c186681deb167cd33afbd9984, server=54d0784d1943,33333,1731528161197 in 197 msec 2024-11-13T20:03:31,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=26 2024-11-13T20:03:31,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure 12abba96fe9236990c9b91820b0bfa3e, server=54d0784d1943,39533,1731528161273 in 201 msec 2024-11-13T20:03:31,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, ASSIGN in 371 msec 2024-11-13T20:03:31,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-13T20:03:31,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, ASSIGN in 372 msec 2024-11-13T20:03:31,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:03:31,416 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528211416"}]},"ts":"1731528211416"} 2024-11-13T20:03:31,420 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-13T20:03:31,423 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:03:31,423 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-13T20:03:31,429 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T20:03:31,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:31,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:31,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:31,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:31,439 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:31,439 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:31,440 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:31,440 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:31,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 490 msec 2024-11-13T20:03:31,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T20:03:31,585 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T20:03:31,585 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:31,589 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-13T20:03:31,589 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,590 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:03:31,592 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:31,598 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:31,605 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:31,609 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T20:03:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528211609 (current time:1731528211609). 2024-11-13T20:03:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:03:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-13T20:03:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:03:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fc147a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:31,612 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@162c6e6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:31,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:31,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:31,614 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:31,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fce247b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:31,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:31,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:31,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:31,619 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:31,619 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b6041bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:31,620 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@731c7031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:31,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:31,622 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60132, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:31,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abcd98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:31,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:31,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:31,625 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57250, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:31,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:03:31,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:31,628 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52694, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:31,630 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:31,630 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T20:03:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:03:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T20:03:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-13T20:03:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T20:03:31,634 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:03:31,635 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:03:31,638 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:03:31,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741902_1078 (size=161) 2024-11-13T20:03:31,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741902_1078 (size=161) 2024-11-13T20:03:31,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741902_1078 (size=161) 2024-11-13T20:03:31,646 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:03:31,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984}] 2024-11-13T20:03:31,648 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,648 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T20:03:31,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-13T20:03:31,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for 12abba96fe9236990c9b91820b0bfa3e: 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for 39f4207c186681deb167cd33afbd9984: 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:03:31,800 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:03:31,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741903_1079 (size=68) 2024-11-13T20:03:31,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741903_1079 (size=68) 2024-11-13T20:03:31,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741903_1079 (size=68) 2024-11-13T20:03:31,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741904_1080 (size=68) 2024-11-13T20:03:31,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741904_1080 (size=68) 2024-11-13T20:03:31,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741904_1080 (size=68) 2024-11-13T20:03:31,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-13T20:03:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-13T20:03:31,820 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,820 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:31,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:31,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-13T20:03:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-13T20:03:31,822 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,822 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:31,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e in 175 msec 2024-11-13T20:03:31,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=30 2024-11-13T20:03:31,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984 in 178 msec 2024-11-13T20:03:31,826 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:03:31,827 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:03:31,829 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:03:31,829 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:03:31,829 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:31,830 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:03:31,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741905_1081 (size=60) 2024-11-13T20:03:31,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741905_1081 (size=60) 2024-11-13T20:03:31,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741905_1081 (size=60) 2024-11-13T20:03:31,839 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:03:31,840 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-13T20:03:31,840 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-13T20:03:31,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741906_1082 (size=641) 2024-11-13T20:03:31,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741906_1082 (size=641) 2024-11-13T20:03:31,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741906_1082 (size=641) 2024-11-13T20:03:31,872 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:03:31,882 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:03:31,883 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-13T20:03:31,885 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:03:31,885 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-13T20:03:31,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 254 msec 2024-11-13T20:03:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T20:03:31,957 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T20:03:31,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:03:31,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:03:31,977 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:31,984 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-13T20:03:31,984 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:31,984 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:03:31,987 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:31,999 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:32,018 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:32,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T20:03:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528212023 (current time:1731528212023). 2024-11-13T20:03:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:03:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-13T20:03:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:03:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a4c0559, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:32,029 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:32,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:32,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:32,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ee415da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:32,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:32,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:32,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:32,034 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37462, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:32,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7042a1fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:32,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:32,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:32,046 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39942, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:32,048 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:32,049 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e9f2a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:32,056 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:32,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:32,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:32,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fc16d49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:32,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:32,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:32,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:32,058 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:32,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@381a4362, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:32,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:32,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:32,069 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39958, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:32,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:03:32,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:32,073 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:32,074 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:32,075 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T20:03:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:03:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T20:03:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-13T20:03:32,081 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:03:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T20:03:32,084 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:03:32,087 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:03:32,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741907_1083 (size=156) 2024-11-13T20:03:32,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741907_1083 (size=156) 2024-11-13T20:03:32,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741907_1083 (size=156) 2024-11-13T20:03:32,119 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:03:32,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984}] 2024-11-13T20:03:32,121 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:32,121 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:32,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T20:03:32,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-13T20:03:32,274 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:32,274 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing 39f4207c186681deb167cd33afbd9984 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-13T20:03:32,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-13T20:03:32,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:32,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing 12abba96fe9236990c9b91820b0bfa3e 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-13T20:03:32,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e is 71, key is 0248d728f909378ae80867cdf93cf839/cf:q/1731528211970/Put/seqid=0 2024-11-13T20:03:32,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984 is 71, key is 1ad23bc85ffefdfb201db3901de4cf59/cf:q/1731528211972/Put/seqid=0 2024-11-13T20:03:32,359 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-13T20:03:32,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741908_1084 (size=5032) 2024-11-13T20:03:32,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741908_1084 (size=5032) 2024-11-13T20:03:32,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741908_1084 (size=5032) 2024-11-13T20:03:32,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741909_1085 (size=8242) 2024-11-13T20:03:32,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741909_1085 (size=8242) 2024-11-13T20:03:32,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741909_1085 (size=8242) 2024-11-13T20:03:32,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:32,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T20:03:32,418 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:32,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/.tmp/cf/10d7912102ed4c299ac30146b50b0007, store: [table=testtb-testExportWithResetTtl family=cf region=39f4207c186681deb167cd33afbd9984] 2024-11-13T20:03:32,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/.tmp/cf/10d7912102ed4c299ac30146b50b0007 is 206, key is 170d838608a806dfb5f4273dfe8fca350/cf:q/1731528211972/Put/seqid=0 2024-11-13T20:03:32,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741910_1086 (size=15057) 2024-11-13T20:03:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741910_1086 (size=15057) 2024-11-13T20:03:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741910_1086 (size=15057) 2024-11-13T20:03:32,434 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/.tmp/cf/10d7912102ed4c299ac30146b50b0007 2024-11-13T20:03:32,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/.tmp/cf/10d7912102ed4c299ac30146b50b0007 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf/10d7912102ed4c299ac30146b50b0007 2024-11-13T20:03:32,499 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf/10d7912102ed4c299ac30146b50b0007, entries=48, sequenceid=6, filesize=14.7 K 2024-11-13T20:03:32,506 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 39f4207c186681deb167cd33afbd9984 in 232ms, sequenceid=6, compaction requested=false 2024-11-13T20:03:32,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for 39f4207c186681deb167cd33afbd9984: 2024-11-13T20:03:32,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. for snaptb0-testExportWithResetTtl completed. 2024-11-13T20:03:32,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T20:03:32,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:32,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf/10d7912102ed4c299ac30146b50b0007] hfiles 2024-11-13T20:03:32,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf/10d7912102ed4c299ac30146b50b0007 for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T20:03:32,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741911_1087 (size=107) 2024-11-13T20:03:32,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741911_1087 (size=107) 2024-11-13T20:03:32,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741911_1087 (size=107) 2024-11-13T20:03:32,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:32,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-13T20:03:32,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-13T20:03:32,593 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:32,594 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:32,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39f4207c186681deb167cd33afbd9984 in 478 msec 2024-11-13T20:03:32,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T20:03:32,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:32,768 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:32,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/.tmp/cf/3a729c3b994841309f7ef9de11de59df, store: [table=testtb-testExportWithResetTtl family=cf region=12abba96fe9236990c9b91820b0bfa3e] 2024-11-13T20:03:32,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/.tmp/cf/3a729c3b994841309f7ef9de11de59df is 206, key is 04414c03cbdfbdb7dff6fbf0a4296300e/cf:q/1731528211970/Put/seqid=0 2024-11-13T20:03:32,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741912_1088 (size=5700) 2024-11-13T20:03:32,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741912_1088 (size=5700) 2024-11-13T20:03:32,782 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/.tmp/cf/3a729c3b994841309f7ef9de11de59df 2024-11-13T20:03:32,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741912_1088 (size=5700) 2024-11-13T20:03:32,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/.tmp/cf/3a729c3b994841309f7ef9de11de59df as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf/3a729c3b994841309f7ef9de11de59df 2024-11-13T20:03:32,801 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf/3a729c3b994841309f7ef9de11de59df, entries=2, sequenceid=6, filesize=5.6 K 2024-11-13T20:03:32,805 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 12abba96fe9236990c9b91820b0bfa3e in 530ms, sequenceid=6, compaction requested=false 2024-11-13T20:03:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for 12abba96fe9236990c9b91820b0bfa3e: 2024-11-13T20:03:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. for snaptb0-testExportWithResetTtl completed. 2024-11-13T20:03:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T20:03:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf/3a729c3b994841309f7ef9de11de59df] hfiles 2024-11-13T20:03:32,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf/3a729c3b994841309f7ef9de11de59df for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T20:03:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741913_1089 (size=107) 2024-11-13T20:03:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741913_1089 (size=107) 2024-11-13T20:03:32,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741913_1089 (size=107) 2024-11-13T20:03:32,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:32,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-13T20:03:32,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-13T20:03:32,844 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:32,844 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:32,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-13T20:03:32,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 12abba96fe9236990c9b91820b0bfa3e in 727 msec 2024-11-13T20:03:32,849 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:03:32,850 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:03:32,851 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:03:32,851 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:03:32,851 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:32,853 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e] hfiles 2024-11-13T20:03:32,853 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:32,853 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:32,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741914_1090 (size=291) 2024-11-13T20:03:32,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741914_1090 (size=291) 2024-11-13T20:03:32,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741914_1090 (size=291) 2024-11-13T20:03:33,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T20:03:33,271 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:03:33,271 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-13T20:03:33,272 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-13T20:03:33,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741915_1091 (size=951) 2024-11-13T20:03:33,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741915_1091 (size=951) 2024-11-13T20:03:33,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741915_1091 (size=951) 2024-11-13T20:03:33,295 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:03:33,302 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:03:33,303 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-13T20:03:33,305 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:03:33,305 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-13T20:03:33,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 1.2270 sec 2024-11-13T20:03:34,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T20:03:34,225 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T20:03:34,227 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:03:34,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:34,231 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:03:34,231 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-13T20:03:34,232 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:03:34,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T20:03:34,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741916_1092 (size=433) 2024-11-13T20:03:34,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741916_1092 (size=433) 2024-11-13T20:03:34,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741916_1092 (size=433) 2024-11-13T20:03:34,308 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6907fcc40f0202bda334fd38c624294a, NAME => 'testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:34,320 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ff46acbd2fe1acdeb7bcecd7dc73b3b8, NAME => 'testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:34,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T20:03:34,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741917_1093 (size=58) 2024-11-13T20:03:34,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741917_1093 (size=58) 2024-11-13T20:03:34,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741917_1093 (size=58) 2024-11-13T20:03:34,354 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:34,355 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 6907fcc40f0202bda334fd38c624294a, disabling compactions & flushes 2024-11-13T20:03:34,355 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,355 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,355 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. after waiting 0 ms 2024-11-13T20:03:34,355 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,355 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,355 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6907fcc40f0202bda334fd38c624294a: Waiting for close lock at 1731528214355Disabling compacts and flushes for region at 1731528214355Disabling writes for close at 1731528214355Writing region close event to WAL at 1731528214355Closed at 1731528214355 2024-11-13T20:03:34,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741918_1094 (size=58) 2024-11-13T20:03:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741918_1094 (size=58) 2024-11-13T20:03:34,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741918_1094 (size=58) 2024-11-13T20:03:34,374 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:34,374 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ff46acbd2fe1acdeb7bcecd7dc73b3b8, disabling compactions & flushes 2024-11-13T20:03:34,374 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,374 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,374 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. after waiting 0 ms 2024-11-13T20:03:34,374 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,374 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,374 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ff46acbd2fe1acdeb7bcecd7dc73b3b8: Waiting for close lock at 1731528214374Disabling compacts and flushes for region at 1731528214374Disabling writes for close at 1731528214374Writing region close event to WAL at 1731528214374Closed at 1731528214374 2024-11-13T20:03:34,376 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:03:34,377 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731528214376"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528214376"}]},"ts":"1731528214376"} 2024-11-13T20:03:34,377 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731528214376"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528214376"}]},"ts":"1731528214376"} 2024-11-13T20:03:34,383 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:03:34,387 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:03:34,388 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528214387"}]},"ts":"1731528214387"} 2024-11-13T20:03:34,394 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-13T20:03:34,395 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:03:34,397 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:03:34,397 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:03:34,397 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:03:34,397 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:03:34,398 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, ASSIGN}] 2024-11-13T20:03:34,401 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, ASSIGN 2024-11-13T20:03:34,401 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, ASSIGN 2024-11-13T20:03:34,403 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:03:34,403 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:03:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T20:03:34,554 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:03:34,554 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=6907fcc40f0202bda334fd38c624294a, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:03:34,554 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=ff46acbd2fe1acdeb7bcecd7dc73b3b8, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:34,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, ASSIGN because future has completed 2024-11-13T20:03:34,559 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6907fcc40f0202bda334fd38c624294a, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:03:34,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, ASSIGN because future has completed 2024-11-13T20:03:34,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:03:34,719 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,719 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => 6907fcc40f0202bda334fd38c624294a, NAME => 'testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:03:34,719 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. service=AccessControlService 2024-11-13T20:03:34,719 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:03:34,720 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,720 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:34,720 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,720 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,722 INFO [StoreOpener-6907fcc40f0202bda334fd38c624294a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,723 INFO [StoreOpener-6907fcc40f0202bda334fd38c624294a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6907fcc40f0202bda334fd38c624294a columnFamilyName cf 2024-11-13T20:03:34,724 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,724 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => ff46acbd2fe1acdeb7bcecd7dc73b3b8, NAME => 'testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:03:34,724 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. service=AccessControlService 2024-11-13T20:03:34,724 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:03:34,724 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,724 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:34,725 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,725 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,725 DEBUG [StoreOpener-6907fcc40f0202bda334fd38c624294a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:34,726 INFO [StoreOpener-6907fcc40f0202bda334fd38c624294a-1 {}] regionserver.HStore(327): Store=6907fcc40f0202bda334fd38c624294a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:03:34,726 INFO [StoreOpener-ff46acbd2fe1acdeb7bcecd7dc73b3b8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,726 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,727 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,728 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,728 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,728 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,728 INFO [StoreOpener-ff46acbd2fe1acdeb7bcecd7dc73b3b8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff46acbd2fe1acdeb7bcecd7dc73b3b8 columnFamilyName cf 2024-11-13T20:03:34,729 DEBUG [StoreOpener-ff46acbd2fe1acdeb7bcecd7dc73b3b8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:34,730 INFO [StoreOpener-ff46acbd2fe1acdeb7bcecd7dc73b3b8-1 {}] regionserver.HStore(327): Store=ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:03:34,730 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,731 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,731 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,731 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,732 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,732 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,733 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:03:34,734 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,734 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened 6907fcc40f0202bda334fd38c624294a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64899180, jitterRate=-0.032926857471466064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:03:34,734 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:34,735 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for 6907fcc40f0202bda334fd38c624294a: Running coprocessor pre-open hook at 1731528214720Writing region info on filesystem at 1731528214720Initializing all the Stores at 1731528214721 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528214721Cleaning up temporary data from old regions at 1731528214728 (+7 ms)Running coprocessor post-open hooks at 1731528214734 (+6 ms)Region opened successfully at 1731528214735 (+1 ms) 2024-11-13T20:03:34,736 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a., pid=39, masterSystemTime=1731528214715 2024-11-13T20:03:34,736 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:03:34,737 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened ff46acbd2fe1acdeb7bcecd7dc73b3b8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62498940, jitterRate=-0.06869322061538696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:03:34,737 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:34,737 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for ff46acbd2fe1acdeb7bcecd7dc73b3b8: Running coprocessor pre-open hook at 1731528214725Writing region info on filesystem at 1731528214725Initializing all the Stores at 1731528214726 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528214726Cleaning up temporary data from old regions at 1731528214732 (+6 ms)Running coprocessor post-open hooks at 1731528214737 (+5 ms)Region opened successfully at 1731528214737 2024-11-13T20:03:34,738 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8., pid=40, masterSystemTime=1731528214720 2024-11-13T20:03:34,740 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,740 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,740 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=6907fcc40f0202bda334fd38c624294a, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:03:34,741 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,741 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:34,742 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=ff46acbd2fe1acdeb7bcecd7dc73b3b8, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:34,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6907fcc40f0202bda334fd38c624294a, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:03:34,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:03:34,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=37 2024-11-13T20:03:34,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-13T20:03:34,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure 6907fcc40f0202bda334fd38c624294a, server=54d0784d1943,36439,1731528160987 in 187 msec 2024-11-13T20:03:34,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8, server=54d0784d1943,33333,1731528161197 in 183 msec 2024-11-13T20:03:34,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, ASSIGN in 350 msec 2024-11-13T20:03:34,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=36 2024-11-13T20:03:34,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, ASSIGN in 351 msec 2024-11-13T20:03:34,753 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:03:34,754 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528214754"}]},"ts":"1731528214754"} 2024-11-13T20:03:34,756 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-13T20:03:34,757 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:03:34,757 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-13T20:03:34,762 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T20:03:34,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:34,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:34,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:34,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:34,827 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,827 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,828 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,828 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:34,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 597 msec 2024-11-13T20:03:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T20:03:34,858 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-13T20:03:34,859 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,863 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-13T20:03:34,863 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,863 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:03:34,866 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,875 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,891 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:03:34,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:03:34,912 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,916 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-13T20:03:34,916 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:34,916 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:03:34,919 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,926 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,934 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T20:03:34,942 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-13T20:03:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528214942 (current time:1731528214942). 2024-11-13T20:03:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-13T20:03:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:03:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ccab28a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:34,948 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:34,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:34,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:34,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734ee1bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:34,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:34,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:34,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:34,950 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:34,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@92c0f87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:34,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:34,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:34,955 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:34,957 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:34,957 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18618835, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:34,960 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:34,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:34,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:34,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4054d881, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:34,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:34,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:34,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:34,962 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:34,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63881cc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:34,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:34,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:34,967 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0001_000001 (auth:SIMPLE) from 127.0.0.1:57914 2024-11-13T20:03:34,975 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39990, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:34,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:03:34,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:34,980 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:34,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:34,983 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T20:03:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:03:34,989 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0001/container_1731528172678_0001_01_000001/launch_container.sh] 2024-11-13T20:03:34,990 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0001/container_1731528172678_0001_01_000001/container_tokens] 2024-11-13T20:03:34,990 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0001/container_1731528172678_0001_01_000001/sysfs] 2024-11-13T20:03:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-13T20:03:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-13T20:03:34,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T20:03:34,994 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:03:34,996 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:03:35,008 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:03:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741919_1095 (size=143) 2024-11-13T20:03:35,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741919_1095 (size=143) 2024-11-13T20:03:35,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741919_1095 (size=143) 2024-11-13T20:03:35,038 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:03:35,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6907fcc40f0202bda334fd38c624294a}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8}] 2024-11-13T20:03:35,041 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:35,041 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T20:03:35,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-13T20:03:35,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-13T20:03:35,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:35,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:35,195 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing 6907fcc40f0202bda334fd38c624294a 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-13T20:03:35,195 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing ff46acbd2fe1acdeb7bcecd7dc73b3b8 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-13T20:03:35,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a is 71, key is 004116fb8bdc492c5a0974809af11037/cf:q/1731528214906/Put/seqid=0 2024-11-13T20:03:35,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8 is 71, key is 1064880014203484f2b3ad9119b81c92/cf:q/1731528214909/Put/seqid=0 2024-11-13T20:03:35,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741920_1096 (size=5242) 2024-11-13T20:03:35,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741920_1096 (size=5242) 2024-11-13T20:03:35,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741921_1097 (size=8031) 2024-11-13T20:03:35,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741920_1096 (size=5242) 2024-11-13T20:03:35,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741921_1097 (size=8031) 2024-11-13T20:03:35,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:35,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:35,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741921_1097 (size=8031) 2024-11-13T20:03:35,234 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:35,235 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:35,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/.tmp/cf/58a70592d94e49978edcfe0b5d10ddde, store: [table=testExportWithResetTtl family=cf region=6907fcc40f0202bda334fd38c624294a] 2024-11-13T20:03:35,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/.tmp/cf/9e60d098b4814f8599bb8c79ec7d2650, store: [table=testExportWithResetTtl family=cf region=ff46acbd2fe1acdeb7bcecd7dc73b3b8] 2024-11-13T20:03:35,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/.tmp/cf/9e60d098b4814f8599bb8c79ec7d2650 is 199, key is 19638799a9bf26e12f73872fa5d5d4d65/cf:q/1731528214909/Put/seqid=0 2024-11-13T20:03:35,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/.tmp/cf/58a70592d94e49978edcfe0b5d10ddde is 199, key is 0dcd5738ad0d788cb1dd88f0aecc51f7f/cf:q/1731528214906/Put/seqid=0 2024-11-13T20:03:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741922_1098 (size=14127) 2024-11-13T20:03:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741922_1098 (size=14127) 2024-11-13T20:03:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741922_1098 (size=14127) 2024-11-13T20:03:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741923_1099 (size=6268) 2024-11-13T20:03:35,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741923_1099 (size=6268) 2024-11-13T20:03:35,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741923_1099 (size=6268) 2024-11-13T20:03:35,249 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/.tmp/cf/9e60d098b4814f8599bb8c79ec7d2650 2024-11-13T20:03:35,249 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/.tmp/cf/58a70592d94e49978edcfe0b5d10ddde 2024-11-13T20:03:35,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/.tmp/cf/9e60d098b4814f8599bb8c79ec7d2650 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf/9e60d098b4814f8599bb8c79ec7d2650 2024-11-13T20:03:35,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/.tmp/cf/58a70592d94e49978edcfe0b5d10ddde as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf/58a70592d94e49978edcfe0b5d10ddde 2024-11-13T20:03:35,277 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf/9e60d098b4814f8599bb8c79ec7d2650, entries=45, sequenceid=5, filesize=13.8 K 2024-11-13T20:03:35,279 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for ff46acbd2fe1acdeb7bcecd7dc73b3b8 in 84ms, sequenceid=5, compaction requested=false 2024-11-13T20:03:35,279 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-13T20:03:35,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for ff46acbd2fe1acdeb7bcecd7dc73b3b8: 2024-11-13T20:03:35,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. for snaptb-testExportWithResetTtl completed. 2024-11-13T20:03:35,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-13T20:03:35,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:35,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf/9e60d098b4814f8599bb8c79ec7d2650] hfiles 2024-11-13T20:03:35,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf/9e60d098b4814f8599bb8c79ec7d2650 for snapshot=snaptb-testExportWithResetTtl 2024-11-13T20:03:35,287 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf/58a70592d94e49978edcfe0b5d10ddde, entries=5, sequenceid=5, filesize=6.1 K 2024-11-13T20:03:35,289 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 6907fcc40f0202bda334fd38c624294a in 94ms, sequenceid=5, compaction requested=false 2024-11-13T20:03:35,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for 6907fcc40f0202bda334fd38c624294a: 2024-11-13T20:03:35,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. for snaptb-testExportWithResetTtl completed. 2024-11-13T20:03:35,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-13T20:03:35,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:35,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf/58a70592d94e49978edcfe0b5d10ddde] hfiles 2024-11-13T20:03:35,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf/58a70592d94e49978edcfe0b5d10ddde for snapshot=snaptb-testExportWithResetTtl 2024-11-13T20:03:35,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741924_1100 (size=100) 2024-11-13T20:03:35,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741924_1100 (size=100) 2024-11-13T20:03:35,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741924_1100 (size=100) 2024-11-13T20:03:35,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:35,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-13T20:03:35,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-13T20:03:35,306 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:35,306 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:35,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8 in 268 msec 2024-11-13T20:03:35,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741925_1101 (size=100) 2024-11-13T20:03:35,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T20:03:35,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741925_1101 (size=100) 2024-11-13T20:03:35,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741925_1101 (size=100) 2024-11-13T20:03:35,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:35,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-13T20:03:35,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-13T20:03:35,319 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:35,319 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:35,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-13T20:03:35,325 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:03:35,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6907fcc40f0202bda334fd38c624294a in 281 msec 2024-11-13T20:03:35,326 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:03:35,328 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:03:35,328 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:03:35,328 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:35,330 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a] hfiles 2024-11-13T20:03:35,331 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:35,331 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:35,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741926_1102 (size=284) 2024-11-13T20:03:35,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741926_1102 (size=284) 2024-11-13T20:03:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741926_1102 (size=284) 2024-11-13T20:03:35,379 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:03:35,379 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-13T20:03:35,380 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-13T20:03:35,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741927_1103 (size=923) 2024-11-13T20:03:35,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741927_1103 (size=923) 2024-11-13T20:03:35,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741927_1103 (size=923) 2024-11-13T20:03:35,464 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:03:35,477 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:03:35,478 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-13T20:03:35,480 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:03:35,480 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-13T20:03:35,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 495 msec 2024-11-13T20:03:35,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T20:03:35,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-13T20:03:35,643 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643 2024-11-13T20:03:35,644 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:35,710 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:35,710 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-13T20:03:35,713 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:03:35,720 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-13T20:03:35,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741928_1104 (size=143) 2024-11-13T20:03:35,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741928_1104 (size=143) 2024-11-13T20:03:35,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741928_1104 (size=143) 2024-11-13T20:03:35,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741929_1105 (size=923) 2024-11-13T20:03:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741929_1105 (size=923) 2024-11-13T20:03:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741929_1105 (size=923) 2024-11-13T20:03:35,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741930_1106 (size=141) 2024-11-13T20:03:35,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741930_1106 (size=141) 2024-11-13T20:03:35,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741930_1106 (size=141) 2024-11-13T20:03:35,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:35,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:35,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:35,928 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:03:37,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-5352323650415006928.jar 2024-11-13T20:03:37,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-11652257980044515481.jar 2024-11-13T20:03:37,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:37,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:03:37,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:03:37,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:03:37,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:03:37,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:03:37,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:03:37,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:03:37,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:03:37,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:03:37,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:03:37,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:03:37,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:37,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:37,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:03:37,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:37,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:03:37,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:03:37,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:03:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741931_1107 (size=131440) 2024-11-13T20:03:37,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741931_1107 (size=131440) 2024-11-13T20:03:37,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741931_1107 (size=131440) 2024-11-13T20:03:37,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741932_1108 (size=4188619) 2024-11-13T20:03:37,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741932_1108 (size=4188619) 2024-11-13T20:03:37,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741932_1108 (size=4188619) 2024-11-13T20:03:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741933_1109 (size=1323991) 2024-11-13T20:03:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741933_1109 (size=1323991) 2024-11-13T20:03:37,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741933_1109 (size=1323991) 2024-11-13T20:03:37,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741934_1110 (size=440392) 2024-11-13T20:03:37,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741934_1110 (size=440392) 2024-11-13T20:03:37,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741934_1110 (size=440392) 2024-11-13T20:03:37,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741935_1111 (size=903730) 2024-11-13T20:03:37,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741935_1111 (size=903730) 2024-11-13T20:03:37,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741935_1111 (size=903730) 2024-11-13T20:03:38,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741936_1112 (size=8360083) 2024-11-13T20:03:38,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741936_1112 (size=8360083) 2024-11-13T20:03:38,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741936_1112 (size=8360083) 2024-11-13T20:03:38,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741937_1113 (size=1877034) 2024-11-13T20:03:38,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741937_1113 (size=1877034) 2024-11-13T20:03:38,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741937_1113 (size=1877034) 2024-11-13T20:03:38,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741938_1114 (size=77835) 2024-11-13T20:03:38,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741938_1114 (size=77835) 2024-11-13T20:03:38,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741938_1114 (size=77835) 2024-11-13T20:03:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741939_1115 (size=30949) 2024-11-13T20:03:38,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741939_1115 (size=30949) 2024-11-13T20:03:38,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741939_1115 (size=30949) 2024-11-13T20:03:38,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741940_1116 (size=1597327) 2024-11-13T20:03:38,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741940_1116 (size=1597327) 2024-11-13T20:03:38,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741940_1116 (size=1597327) 2024-11-13T20:03:38,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741941_1117 (size=4695811) 2024-11-13T20:03:38,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741941_1117 (size=4695811) 2024-11-13T20:03:38,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741941_1117 (size=4695811) 2024-11-13T20:03:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741942_1118 (size=232957) 2024-11-13T20:03:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741942_1118 (size=232957) 2024-11-13T20:03:38,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741942_1118 (size=232957) 2024-11-13T20:03:38,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741943_1119 (size=127628) 2024-11-13T20:03:38,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741943_1119 (size=127628) 2024-11-13T20:03:38,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741943_1119 (size=127628) 2024-11-13T20:03:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741944_1120 (size=20406) 2024-11-13T20:03:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741944_1120 (size=20406) 2024-11-13T20:03:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741944_1120 (size=20406) 2024-11-13T20:03:38,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741945_1121 (size=5175431) 2024-11-13T20:03:38,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741945_1121 (size=5175431) 2024-11-13T20:03:38,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741945_1121 (size=5175431) 2024-11-13T20:03:38,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741946_1122 (size=217634) 2024-11-13T20:03:38,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741946_1122 (size=217634) 2024-11-13T20:03:38,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741946_1122 (size=217634) 2024-11-13T20:03:38,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741947_1123 (size=1832290) 2024-11-13T20:03:38,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741947_1123 (size=1832290) 2024-11-13T20:03:38,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741947_1123 (size=1832290) 2024-11-13T20:03:38,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741948_1124 (size=322274) 2024-11-13T20:03:38,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741948_1124 (size=322274) 2024-11-13T20:03:38,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741948_1124 (size=322274) 2024-11-13T20:03:38,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741949_1125 (size=503880) 2024-11-13T20:03:38,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741949_1125 (size=503880) 2024-11-13T20:03:38,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741949_1125 (size=503880) 2024-11-13T20:03:38,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741950_1126 (size=6424542) 2024-11-13T20:03:38,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741950_1126 (size=6424542) 2024-11-13T20:03:38,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741950_1126 (size=6424542) 2024-11-13T20:03:38,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741951_1127 (size=29229) 2024-11-13T20:03:38,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741951_1127 (size=29229) 2024-11-13T20:03:38,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741951_1127 (size=29229) 2024-11-13T20:03:38,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741952_1128 (size=24096) 2024-11-13T20:03:38,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741952_1128 (size=24096) 2024-11-13T20:03:38,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741952_1128 (size=24096) 2024-11-13T20:03:38,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741953_1129 (size=111872) 2024-11-13T20:03:38,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741953_1129 (size=111872) 2024-11-13T20:03:38,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741953_1129 (size=111872) 2024-11-13T20:03:38,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741954_1130 (size=45609) 2024-11-13T20:03:38,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741954_1130 (size=45609) 2024-11-13T20:03:38,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741954_1130 (size=45609) 2024-11-13T20:03:38,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741955_1131 (size=136454) 2024-11-13T20:03:38,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741955_1131 (size=136454) 2024-11-13T20:03:38,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741955_1131 (size=136454) 2024-11-13T20:03:38,837 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:03:38,845 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-13T20:03:38,849 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:03:38,856 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=32.9 K 2024-11-13T20:03:38,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741956_1132 (size=686) 2024-11-13T20:03:38,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741956_1132 (size=686) 2024-11-13T20:03:38,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741956_1132 (size=686) 2024-11-13T20:03:38,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741957_1133 (size=15) 2024-11-13T20:03:38,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741957_1133 (size=15) 2024-11-13T20:03:38,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741957_1133 (size=15) 2024-11-13T20:03:38,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741958_1134 (size=303724) 2024-11-13T20:03:38,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741958_1134 (size=303724) 2024-11-13T20:03:38,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741958_1134 (size=303724) 2024-11-13T20:03:39,031 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:03:39,031 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:03:39,098 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0002_000001 (auth:SIMPLE) from 127.0.0.1:42336 2024-11-13T20:03:40,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-13T20:03:40,608 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-13T20:03:40,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-13T20:03:40,609 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-13T20:03:44,752 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ff46acbd2fe1acdeb7bcecd7dc73b3b8 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:03:44,757 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 39f4207c186681deb167cd33afbd9984 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:03:44,764 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f7446bb3f02b60d610e520d40f8cc773 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:03:44,764 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6907fcc40f0202bda334fd38c624294a changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:03:44,765 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 12abba96fe9236990c9b91820b0bfa3e changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:03:46,159 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0002_000001 (auth:SIMPLE) from 127.0.0.1:60718 2024-11-13T20:03:46,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741959_1135 (size=349374) 2024-11-13T20:03:46,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741959_1135 (size=349374) 2024-11-13T20:03:46,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741959_1135 (size=349374) 2024-11-13T20:03:48,416 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0002_000001 (auth:SIMPLE) from 127.0.0.1:37056 2024-11-13T20:03:53,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741960_1136 (size=14127) 2024-11-13T20:03:53,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741960_1136 (size=14127) 2024-11-13T20:03:53,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741960_1136 (size=14127) 2024-11-13T20:03:53,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741961_1137 (size=8031) 2024-11-13T20:03:53,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741961_1137 (size=8031) 2024-11-13T20:03:53,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741961_1137 (size=8031) 2024-11-13T20:03:54,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741962_1138 (size=6268) 2024-11-13T20:03:54,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741962_1138 (size=6268) 2024-11-13T20:03:54,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741962_1138 (size=6268) 2024-11-13T20:03:54,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741963_1139 (size=5242) 2024-11-13T20:03:54,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741963_1139 (size=5242) 2024-11-13T20:03:54,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741963_1139 (size=5242) 2024-11-13T20:03:54,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741964_1140 (size=17458) 2024-11-13T20:03:54,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741964_1140 (size=17458) 2024-11-13T20:03:54,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741964_1140 (size=17458) 2024-11-13T20:03:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741965_1141 (size=461) 2024-11-13T20:03:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741965_1141 (size=461) 2024-11-13T20:03:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741965_1141 (size=461) 2024-11-13T20:03:54,580 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0002/container_1731528172678_0002_01_000002/launch_container.sh] 2024-11-13T20:03:54,580 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0002/container_1731528172678_0002_01_000002/container_tokens] 2024-11-13T20:03:54,580 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0002/container_1731528172678_0002_01_000002/sysfs] 2024-11-13T20:03:55,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741966_1142 (size=17458) 2024-11-13T20:03:55,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741966_1142 (size=17458) 2024-11-13T20:03:55,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741966_1142 (size=17458) 2024-11-13T20:03:55,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741967_1143 (size=349374) 2024-11-13T20:03:55,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741967_1143 (size=349374) 2024-11-13T20:03:55,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741967_1143 (size=349374) 2024-11-13T20:03:55,103 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0002_000001 (auth:SIMPLE) from 127.0.0.1:43328 2024-11-13T20:03:56,244 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:03:56,254 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:03:56,267 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-13T20:03:56,267 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:03:56,268 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:03:56,268 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-13T20:03:56,268 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-13T20:03:56,268 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-13T20:03:56,268 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-13T20:03:56,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-13T20:03:56,269 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528215643/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-13T20:03:56,278 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-13T20:03:56,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-13T20:03:56,282 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528236281"}]},"ts":"1731528236281"} 2024-11-13T20:03:56,285 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-13T20:03:56,285 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-13T20:03:56,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-13T20:03:56,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, UNASSIGN}] 2024-11-13T20:03:56,289 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, UNASSIGN 2024-11-13T20:03:56,289 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, UNASSIGN 2024-11-13T20:03:56,291 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=6907fcc40f0202bda334fd38c624294a, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:03:56,291 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=ff46acbd2fe1acdeb7bcecd7dc73b3b8, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:56,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, UNASSIGN because future has completed 2024-11-13T20:03:56,293 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:03:56,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:03:56,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, UNASSIGN because future has completed 2024-11-13T20:03:56,298 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:03:56,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6907fcc40f0202bda334fd38c624294a, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:03:56,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-13T20:03:56,446 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:56,447 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:03:56,447 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing ff46acbd2fe1acdeb7bcecd7dc73b3b8, disabling compactions & flushes 2024-11-13T20:03:56,447 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:56,447 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:56,447 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. after waiting 0 ms 2024-11-13T20:03:56,447 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:56,450 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:56,450 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:03:56,451 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing 6907fcc40f0202bda334fd38c624294a, disabling compactions & flushes 2024-11-13T20:03:56,451 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:56,451 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:56,451 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. after waiting 0 ms 2024-11-13T20:03:56,451 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:56,453 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T20:03:56,454 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:03:56,454 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8. 2024-11-13T20:03:56,454 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for ff46acbd2fe1acdeb7bcecd7dc73b3b8: Waiting for close lock at 1731528236447Running coprocessor pre-close hooks at 1731528236447Disabling compacts and flushes for region at 1731528236447Disabling writes for close at 1731528236447Writing region close event to WAL at 1731528236448 (+1 ms)Running coprocessor post-close hooks at 1731528236454 (+6 ms)Closed at 1731528236454 2024-11-13T20:03:56,456 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:56,456 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T20:03:56,456 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=ff46acbd2fe1acdeb7bcecd7dc73b3b8, regionState=CLOSED 2024-11-13T20:03:56,457 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:03:56,457 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a. 2024-11-13T20:03:56,457 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for 6907fcc40f0202bda334fd38c624294a: Waiting for close lock at 1731528236451Running coprocessor pre-close hooks at 1731528236451Disabling compacts and flushes for region at 1731528236451Disabling writes for close at 1731528236451Writing region close event to WAL at 1731528236452 (+1 ms)Running coprocessor post-close hooks at 1731528236457 (+5 ms)Closed at 1731528236457 2024-11-13T20:03:56,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:03:56,459 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed 6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:56,459 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=6907fcc40f0202bda334fd38c624294a, regionState=CLOSED 2024-11-13T20:03:56,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-13T20:03:56,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6907fcc40f0202bda334fd38c624294a, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:03:56,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure ff46acbd2fe1acdeb7bcecd7dc73b3b8, server=54d0784d1943,33333,1731528161197 in 167 msec 2024-11-13T20:03:56,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ff46acbd2fe1acdeb7bcecd7dc73b3b8, UNASSIGN in 174 msec 2024-11-13T20:03:56,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-13T20:03:56,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure 6907fcc40f0202bda334fd38c624294a, server=54d0784d1943,36439,1731528160987 in 166 msec 2024-11-13T20:03:56,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-13T20:03:56,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6907fcc40f0202bda334fd38c624294a, UNASSIGN in 177 msec 2024-11-13T20:03:56,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-13T20:03:56,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 183 msec 2024-11-13T20:03:56,472 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528236471"}]},"ts":"1731528236471"} 2024-11-13T20:03:56,474 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-13T20:03:56,474 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-13T20:03:56,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 197 msec 2024-11-13T20:03:56,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-13T20:03:56,595 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-13T20:03:56,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-13T20:03:56,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,599 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-13T20:03:56,600 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,602 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-13T20:03:56,605 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:56,605 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:56,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,608 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-13T20:03:56,608 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-13T20:03:56,608 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-13T20:03:56,608 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/recovered.edits] 2024-11-13T20:03:56,609 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/recovered.edits] 2024-11-13T20:03:56,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:56,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:56,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T20:03:56,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:56,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-13T20:03:56,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:56,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-11-13T20:03:56,613 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:03:56,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:56,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:56,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:56,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:56,616 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf/9e60d098b4814f8599bb8c79ec7d2650 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/cf/9e60d098b4814f8599bb8c79ec7d2650 2024-11-13T20:03:56,617 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf/58a70592d94e49978edcfe0b5d10ddde to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/cf/58a70592d94e49978edcfe0b5d10ddde 2024-11-13T20:03:56,620 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/recovered.edits/8.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8/recovered.edits/8.seqid 2024-11-13T20:03:56,620 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/recovered.edits/8.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a/recovered.edits/8.seqid 2024-11-13T20:03:56,621 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:56,621 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportWithResetTtl/6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:56,621 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-13T20:03:56,622 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-13T20:03:56,622 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-11-13T20:03:56,628 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113f398f14af2a44bb8b52142ddf17cef3b_ff46acbd2fe1acdeb7bcecd7dc73b3b8 2024-11-13T20:03:56,629 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113da0fca358efd48efb3f5d5845ec220e8_6907fcc40f0202bda334fd38c624294a 2024-11-13T20:03:56,630 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-13T20:03:56,633 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,636 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-13T20:03:56,639 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-13T20:03:56,640 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,641 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-13T20:03:56,641 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528236641"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:56,641 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528236641"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:56,644 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:03:56,644 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6907fcc40f0202bda334fd38c624294a, NAME => 'testExportWithResetTtl,,1731528214227.6907fcc40f0202bda334fd38c624294a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ff46acbd2fe1acdeb7bcecd7dc73b3b8, NAME => 'testExportWithResetTtl,1,1731528214227.ff46acbd2fe1acdeb7bcecd7dc73b3b8.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:03:56,644 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-13T20:03:56,644 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528236644"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:56,646 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-13T20:03:56,647 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T20:03:56,649 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 51 msec 2024-11-13T20:03:56,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-13T20:03:56,716 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-13T20:03:56,716 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-13T20:03:56,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-13T20:03:56,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:56,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-13T20:03:56,721 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528236721"}]},"ts":"1731528236721"} 2024-11-13T20:03:56,723 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-13T20:03:56,724 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-13T20:03:56,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-13T20:03:56,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, UNASSIGN}] 2024-11-13T20:03:56,728 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, UNASSIGN 2024-11-13T20:03:56,728 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, UNASSIGN 2024-11-13T20:03:56,729 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=39f4207c186681deb167cd33afbd9984, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:56,729 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=12abba96fe9236990c9b91820b0bfa3e, regionState=CLOSING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:03:56,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, UNASSIGN because future has completed 2024-11-13T20:03:56,732 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:03:56,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12abba96fe9236990c9b91820b0bfa3e, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:03:56,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, UNASSIGN because future has completed 2024-11-13T20:03:56,733 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:03:56,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39f4207c186681deb167cd33afbd9984, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:03:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-13T20:03:56,885 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:56,885 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:03:56,885 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing 12abba96fe9236990c9b91820b0bfa3e, disabling compactions & flushes 2024-11-13T20:03:56,885 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:56,885 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:56,885 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. after waiting 0 ms 2024-11-13T20:03:56,886 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:56,887 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:56,887 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:03:56,887 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing 39f4207c186681deb167cd33afbd9984, disabling compactions & flushes 2024-11-13T20:03:56,887 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:56,887 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:56,888 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. after waiting 0 ms 2024-11-13T20:03:56,888 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:56,891 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:03:56,892 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:03:56,892 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e. 2024-11-13T20:03:56,892 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for 12abba96fe9236990c9b91820b0bfa3e: Waiting for close lock at 1731528236885Running coprocessor pre-close hooks at 1731528236885Disabling compacts and flushes for region at 1731528236885Disabling writes for close at 1731528236885Writing region close event to WAL at 1731528236886 (+1 ms)Running coprocessor post-close hooks at 1731528236892 (+6 ms)Closed at 1731528236892 2024-11-13T20:03:56,892 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:03:56,893 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:03:56,893 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984. 2024-11-13T20:03:56,893 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for 39f4207c186681deb167cd33afbd9984: Waiting for close lock at 1731528236887Running coprocessor pre-close hooks at 1731528236887Disabling compacts and flushes for region at 1731528236887Disabling writes for close at 1731528236888 (+1 ms)Writing region close event to WAL at 1731528236888Running coprocessor post-close hooks at 1731528236893 (+5 ms)Closed at 1731528236893 2024-11-13T20:03:56,894 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed 12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:56,895 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=12abba96fe9236990c9b91820b0bfa3e, regionState=CLOSED 2024-11-13T20:03:56,895 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed 39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:56,896 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=39f4207c186681deb167cd33afbd9984, regionState=CLOSED 2024-11-13T20:03:56,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12abba96fe9236990c9b91820b0bfa3e, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:03:56,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39f4207c186681deb167cd33afbd9984, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:03:56,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-13T20:03:56,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure 12abba96fe9236990c9b91820b0bfa3e, server=54d0784d1943,39533,1731528161273 in 166 msec 2024-11-13T20:03:56,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=54 2024-11-13T20:03:56,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure 39f4207c186681deb167cd33afbd9984, server=54d0784d1943,33333,1731528161197 in 166 msec 2024-11-13T20:03:56,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=12abba96fe9236990c9b91820b0bfa3e, UNASSIGN in 173 msec 2024-11-13T20:03:56,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=54, resume processing ppid=52 2024-11-13T20:03:56,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=39f4207c186681deb167cd33afbd9984, UNASSIGN in 174 msec 2024-11-13T20:03:56,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-13T20:03:56,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 179 msec 2024-11-13T20:03:56,907 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528236907"}]},"ts":"1731528236907"} 2024-11-13T20:03:56,908 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-13T20:03:56,908 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-13T20:03:56,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 192 msec 2024-11-13T20:03:57,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-13T20:03:57,036 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T20:03:57,037 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-13T20:03:57,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:57,040 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:57,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-13T20:03:57,041 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:57,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-13T20:03:57,048 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:57,049 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,051 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T20:03:57,051 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T20:03:57,052 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T20:03:57,052 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/recovered.edits] 2024-11-13T20:03:57,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-11-13T20:03:57,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,053 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:03:57,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T20:03:57,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,053 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/recovered.edits] 2024-11-13T20:03:57,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-13T20:03:57,063 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf/10d7912102ed4c299ac30146b50b0007 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/cf/10d7912102ed4c299ac30146b50b0007 2024-11-13T20:03:57,064 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf/3a729c3b994841309f7ef9de11de59df to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/cf/3a729c3b994841309f7ef9de11de59df 2024-11-13T20:03:57,069 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e/recovered.edits/9.seqid 2024-11-13T20:03:57,070 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984/recovered.edits/9.seqid 2024-11-13T20:03:57,070 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:57,070 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithResetTtl/39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:57,071 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-13T20:03:57,071 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-13T20:03:57,072 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-11-13T20:03:57,078 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411130bab6062a62e4291909dbe858fee0b79_39f4207c186681deb167cd33afbd9984 2024-11-13T20:03:57,080 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113288c02e4b7e6497086e2d8d12834774f_12abba96fe9236990c9b91820b0bfa3e 2024-11-13T20:03:57,080 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-13T20:03:57,084 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:57,087 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-13T20:03:57,090 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-13T20:03:57,092 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:57,092 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-13T20:03:57,092 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528237092"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:57,092 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528237092"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:57,094 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:03:57,094 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 12abba96fe9236990c9b91820b0bfa3e, NAME => 'testtb-testExportWithResetTtl,,1731528210948.12abba96fe9236990c9b91820b0bfa3e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 39f4207c186681deb167cd33afbd9984, NAME => 'testtb-testExportWithResetTtl,1,1731528210948.39f4207c186681deb167cd33afbd9984.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:03:57,094 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-13T20:03:57,095 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528237094"}]},"ts":"9223372036854775807"} 2024-11-13T20:03:57,097 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-13T20:03:57,098 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T20:03:57,099 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 61 msec 2024-11-13T20:03:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-13T20:03:57,166 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-13T20:03:57,166 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T20:03:57,175 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-13T20:03:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-13T20:03:57,178 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-13T20:03:57,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-13T20:03:57,182 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-13T20:03:57,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-13T20:03:57,205 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=772 (was 757) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:36109 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:43562 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1565579540_1 at /127.0.0.1:41174 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:57634 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37853 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:33861 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 28350) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:41192 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2055 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1565579540_1 at /127.0.0.1:43524 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36109 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=781 (was 773) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=985 (was 1035), ProcessCount=19 (was 19), AvailableMemoryMB=990 (was 504) - AvailableMemoryMB LEAK? - 2024-11-13T20:03:57,206 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=772 is superior to 500 2024-11-13T20:03:57,226 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=772, OpenFileDescriptor=781, MaxFileDescriptor=1048576, SystemLoadAverage=985, ProcessCount=19, AvailableMemoryMB=989 2024-11-13T20:03:57,226 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=772 is superior to 500 2024-11-13T20:03:57,228 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:03:57,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:03:57,230 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:03:57,230 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-13T20:03:57,231 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:03:57,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T20:03:57,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741968_1144 (size=443) 2024-11-13T20:03:57,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741968_1144 (size=443) 2024-11-13T20:03:57,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741968_1144 (size=443) 2024-11-13T20:03:57,248 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 761b60efd5b7969e107760a98d14345f, NAME => 'testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:57,248 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 12ebf52f94b050633db41166224e4585, NAME => 'testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:57,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741970_1146 (size=68) 2024-11-13T20:03:57,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741970_1146 (size=68) 2024-11-13T20:03:57,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741969_1145 (size=68) 2024-11-13T20:03:57,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741970_1146 (size=68) 2024-11-13T20:03:57,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741969_1145 (size=68) 2024-11-13T20:03:57,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741969_1145 (size=68) 2024-11-13T20:03:57,279 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 761b60efd5b7969e107760a98d14345f, disabling compactions & flushes 2024-11-13T20:03:57,280 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. after waiting 0 ms 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 12ebf52f94b050633db41166224e4585, disabling compactions & flushes 2024-11-13T20:03:57,280 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,280 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 761b60efd5b7969e107760a98d14345f: Waiting for close lock at 1731528237280Disabling compacts and flushes for region at 1731528237280Disabling writes for close at 1731528237280Writing region close event to WAL at 1731528237280Closed at 1731528237280 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. after waiting 0 ms 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,280 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,280 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 12ebf52f94b050633db41166224e4585: Waiting for close lock at 1731528237280Disabling compacts and flushes for region at 1731528237280Disabling writes for close at 1731528237280Writing region close event to WAL at 1731528237280Closed at 1731528237280 2024-11-13T20:03:57,283 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:03:57,283 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731528237283"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528237283"}]},"ts":"1731528237283"} 2024-11-13T20:03:57,283 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731528237283"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528237283"}]},"ts":"1731528237283"} 2024-11-13T20:03:57,286 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:03:57,287 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:03:57,287 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528237287"}]},"ts":"1731528237287"} 2024-11-13T20:03:57,289 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-13T20:03:57,290 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:03:57,291 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:03:57,291 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:03:57,291 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:03:57,291 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:03:57,291 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:03:57,291 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:03:57,291 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:03:57,292 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:03:57,292 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:03:57,292 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:03:57,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, ASSIGN}] 2024-11-13T20:03:57,293 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, ASSIGN 2024-11-13T20:03:57,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, ASSIGN 2024-11-13T20:03:57,295 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:03:57,295 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:03:57,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T20:03:57,446 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:03:57,447 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=761b60efd5b7969e107760a98d14345f, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:57,447 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=12ebf52f94b050633db41166224e4585, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:03:57,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, ASSIGN because future has completed 2024-11-13T20:03:57,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:03:57,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, ASSIGN because future has completed 2024-11-13T20:03:57,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:03:57,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T20:03:57,610 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,611 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => 12ebf52f94b050633db41166224e4585, NAME => 'testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:03:57,611 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. service=AccessControlService 2024-11-13T20:03:57,611 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:03:57,611 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,612 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:57,612 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,612 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,612 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,612 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => 761b60efd5b7969e107760a98d14345f, NAME => 'testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:03:57,612 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. service=AccessControlService 2024-11-13T20:03:57,613 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:03:57,613 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,613 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:03:57,613 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,613 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,613 INFO [StoreOpener-12ebf52f94b050633db41166224e4585-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,614 INFO [StoreOpener-761b60efd5b7969e107760a98d14345f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,615 INFO [StoreOpener-12ebf52f94b050633db41166224e4585-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 12ebf52f94b050633db41166224e4585 columnFamilyName cf 2024-11-13T20:03:57,616 INFO [StoreOpener-761b60efd5b7969e107760a98d14345f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 761b60efd5b7969e107760a98d14345f columnFamilyName cf 2024-11-13T20:03:57,616 DEBUG [StoreOpener-12ebf52f94b050633db41166224e4585-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:57,616 DEBUG [StoreOpener-761b60efd5b7969e107760a98d14345f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:57,616 INFO [StoreOpener-12ebf52f94b050633db41166224e4585-1 {}] regionserver.HStore(327): Store=12ebf52f94b050633db41166224e4585/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:03:57,617 INFO [StoreOpener-761b60efd5b7969e107760a98d14345f-1 {}] regionserver.HStore(327): Store=761b60efd5b7969e107760a98d14345f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:03:57,617 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,617 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,618 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,618 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,618 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,618 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,618 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,618 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,619 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,619 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,622 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:03:57,623 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:03:57,623 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened 12ebf52f94b050633db41166224e4585; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59470531, jitterRate=-0.11382003128528595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:03:57,623 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,623 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened 761b60efd5b7969e107760a98d14345f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71488717, jitterRate=0.06526489555835724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:03:57,623 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:57,624 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for 761b60efd5b7969e107760a98d14345f: Running coprocessor pre-open hook at 1731528237613Writing region info on filesystem at 1731528237613Initializing all the Stores at 1731528237614 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528237614Cleaning up temporary data from old regions at 1731528237619 (+5 ms)Running coprocessor post-open hooks at 1731528237623 (+4 ms)Region opened successfully at 1731528237624 (+1 ms) 2024-11-13T20:03:57,624 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for 12ebf52f94b050633db41166224e4585: Running coprocessor pre-open hook at 1731528237612Writing region info on filesystem at 1731528237612Initializing all the Stores at 1731528237613 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528237613Cleaning up temporary data from old regions at 1731528237618 (+5 ms)Running coprocessor post-open hooks at 1731528237623 (+5 ms)Region opened successfully at 1731528237624 (+1 ms) 2024-11-13T20:03:57,625 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f., pid=62, masterSystemTime=1731528237609 2024-11-13T20:03:57,625 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585., pid=61, masterSystemTime=1731528237607 2024-11-13T20:03:57,627 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,627 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:57,627 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=12ebf52f94b050633db41166224e4585, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:03:57,627 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,627 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,628 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=761b60efd5b7969e107760a98d14345f, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:03:57,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:03:57,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:03:57,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=60 2024-11-13T20:03:57,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273 in 181 msec 2024-11-13T20:03:57,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=59 2024-11-13T20:03:57,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197 in 177 msec 2024-11-13T20:03:57,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, ASSIGN in 340 msec 2024-11-13T20:03:57,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=58 2024-11-13T20:03:57,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, ASSIGN in 341 msec 2024-11-13T20:03:57,636 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:03:57,637 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528237636"}]},"ts":"1731528237636"} 2024-11-13T20:03:57,638 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-13T20:03:57,639 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:03:57,639 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-13T20:03:57,642 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T20:03:57,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:03:57,647 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:57,647 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:57,647 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:57,647 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:03:57,648 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 419 msec 2024-11-13T20:03:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T20:03:57,856 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T20:03:57,856 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:57,859 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-13T20:03:57,859 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:57,860 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:03:57,862 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:57,868 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:57,874 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:57,877 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:03:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528237877 (current time:1731528237877). 2024-11-13T20:03:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:03:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-13T20:03:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:03:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54bbf110, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:57,879 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:57,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:57,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:57,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d052c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:57,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:57,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:57,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:57,880 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:57,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd02993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:57,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:57,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:57,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:57,885 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:57,885 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e982e70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:57,886 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:57,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:57,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:57,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c864de1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:57,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:57,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:57,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:57,888 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:57,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fd32123, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:57,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:57,890 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:57,890 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:57,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:03:57,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:57,893 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:57,894 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:57,894 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T20:03:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:03:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:03:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-13T20:03:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T20:03:57,897 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:03:57,898 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:03:57,901 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:03:57,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741971_1147 (size=170) 2024-11-13T20:03:57,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741971_1147 (size=170) 2024-11-13T20:03:57,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741971_1147 (size=170) 2024-11-13T20:03:57,911 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:03:57,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585}] 2024-11-13T20:03:57,912 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:57,912 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T20:03:58,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-13T20:03:58,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 12ebf52f94b050633db41166224e4585: 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for 761b60efd5b7969e107760a98d14345f: 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. for emptySnaptb0-testExportFileSystemState completed. 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. for emptySnaptb0-testExportFileSystemState completed. 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-13T20:03:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:03:58,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:58,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741972_1148 (size=71) 2024-11-13T20:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741973_1149 (size=71) 2024-11-13T20:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741972_1148 (size=71) 2024-11-13T20:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741972_1148 (size=71) 2024-11-13T20:03:58,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741973_1149 (size=71) 2024-11-13T20:03:58,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741973_1149 (size=71) 2024-11-13T20:03:58,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:58,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-13T20:03:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-13T20:03:58,076 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,076 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:58,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-13T20:03:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-13T20:03:58,078 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,078 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585 in 166 msec 2024-11-13T20:03:58,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-11-13T20:03:58,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f in 168 msec 2024-11-13T20:03:58,081 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:03:58,081 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:03:58,082 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:03:58,082 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:03:58,083 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:58,083 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:03:58,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741974_1150 (size=63) 2024-11-13T20:03:58,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741974_1150 (size=63) 2024-11-13T20:03:58,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741974_1150 (size=63) 2024-11-13T20:03:58,094 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:03:58,094 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-13T20:03:58,095 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-13T20:03:58,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741975_1151 (size=653) 2024-11-13T20:03:58,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741975_1151 (size=653) 2024-11-13T20:03:58,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741975_1151 (size=653) 2024-11-13T20:03:58,143 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:03:58,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:03:58,151 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-13T20:03:58,153 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:03:58,153 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-13T20:03:58,156 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 258 msec 2024-11-13T20:03:58,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T20:03:58,215 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T20:03:58,221 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:03:58,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:03:58,225 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:58,228 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-13T20:03:58,228 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:58,228 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:03:58,229 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:58,236 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:58,244 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:03:58,247 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:03:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528238247 (current time:1731528238247). 2024-11-13T20:03:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:03:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-13T20:03:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:03:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@145906ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:58,249 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:58,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:58,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:58,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14398104, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:58,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:58,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:58,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:58,251 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:58,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5db3815a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:58,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:58,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:58,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:58,254 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:58,255 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:58,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:58,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:58,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:58,255 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:58,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e7a536, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:58,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:03:58,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:03:58,257 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:03:58,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:03:58,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:03:58,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a170e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:58,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:03:58,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:03:58,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:58,259 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:03:58,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a0a27ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:03:58,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:03:58,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:03:58,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:58,262 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:58,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:03:58,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:03:58,268 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:03:58,269 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:03:58,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:03:58,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:58,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:03:58,270 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:03:58,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T20:03:58,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:03:58,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:03:58,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-13T20:03:58,273 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:03:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T20:03:58,274 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:03:58,276 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:03:58,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741976_1152 (size=165) 2024-11-13T20:03:58,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741976_1152 (size=165) 2024-11-13T20:03:58,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741976_1152 (size=165) 2024-11-13T20:03:58,301 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:03:58,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585}] 2024-11-13T20:03:58,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,303 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T20:03:58,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-13T20:03:58,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-13T20:03:58,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:58,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:58,457 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing 761b60efd5b7969e107760a98d14345f 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-13T20:03:58,457 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 12ebf52f94b050633db41166224e4585 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-13T20:03:58,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f is 71, key is 00fbb4bff5480d40d30a9cc805aa2dd8/cf:q/1731528238221/Put/seqid=0 2024-11-13T20:03:58,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585 is 71, key is 208c1248396ad3e6ffefd87c8888e048/cf:q/1731528238224/Put/seqid=0 2024-11-13T20:03:58,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741977_1153 (size=5171) 2024-11-13T20:03:58,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741977_1153 (size=5171) 2024-11-13T20:03:58,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741977_1153 (size=5171) 2024-11-13T20:03:58,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:58,500 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/.tmp/cf/1914b3cc51a34b418c62c050249453df, store: [table=testtb-testExportFileSystemState family=cf region=761b60efd5b7969e107760a98d14345f] 2024-11-13T20:03:58,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/.tmp/cf/1914b3cc51a34b418c62c050249453df is 209, key is 09a6e0b7dc48e759a593621e0e6008701/cf:q/1731528238221/Put/seqid=0 2024-11-13T20:03:58,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741978_1154 (size=8102) 2024-11-13T20:03:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741978_1154 (size=8102) 2024-11-13T20:03:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741978_1154 (size=8102) 2024-11-13T20:03:58,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:58,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741979_1155 (size=6121) 2024-11-13T20:03:58,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741979_1155 (size=6121) 2024-11-13T20:03:58,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741979_1155 (size=6121) 2024-11-13T20:03:58,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/.tmp/cf/1914b3cc51a34b418c62c050249453df 2024-11-13T20:03:58,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/.tmp/cf/63f30c7091e44bcf81e6beec5967e997, store: [table=testtb-testExportFileSystemState family=cf region=12ebf52f94b050633db41166224e4585] 2024-11-13T20:03:58,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/.tmp/cf/63f30c7091e44bcf81e6beec5967e997 is 209, key is 118c4eaf69891790c7c2eff8b94f06e45/cf:q/1731528238224/Put/seqid=0 2024-11-13T20:03:58,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/.tmp/cf/1914b3cc51a34b418c62c050249453df as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf/1914b3cc51a34b418c62c050249453df 2024-11-13T20:03:58,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741980_1156 (size=14794) 2024-11-13T20:03:58,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741980_1156 (size=14794) 2024-11-13T20:03:58,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741980_1156 (size=14794) 2024-11-13T20:03:58,521 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/.tmp/cf/63f30c7091e44bcf81e6beec5967e997 2024-11-13T20:03:58,522 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf/1914b3cc51a34b418c62c050249453df, entries=4, sequenceid=6, filesize=6.0 K 2024-11-13T20:03:58,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 761b60efd5b7969e107760a98d14345f in 67ms, sequenceid=6, compaction requested=false 2024-11-13T20:03:58,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-13T20:03:58,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for 761b60efd5b7969e107760a98d14345f: 2024-11-13T20:03:58,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. for snaptb0-testExportFileSystemState completed. 2024-11-13T20:03:58,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-13T20:03:58,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:58,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf/1914b3cc51a34b418c62c050249453df] hfiles 2024-11-13T20:03:58,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf/1914b3cc51a34b418c62c050249453df for snapshot=snaptb0-testExportFileSystemState 2024-11-13T20:03:58,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/.tmp/cf/63f30c7091e44bcf81e6beec5967e997 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf/63f30c7091e44bcf81e6beec5967e997 2024-11-13T20:03:58,535 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf/63f30c7091e44bcf81e6beec5967e997, entries=46, sequenceid=6, filesize=14.4 K 2024-11-13T20:03:58,536 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 12ebf52f94b050633db41166224e4585 in 79ms, sequenceid=6, compaction requested=false 2024-11-13T20:03:58,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 12ebf52f94b050633db41166224e4585: 2024-11-13T20:03:58,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. for snaptb0-testExportFileSystemState completed. 2024-11-13T20:03:58,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-13T20:03:58,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:03:58,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf/63f30c7091e44bcf81e6beec5967e997] hfiles 2024-11-13T20:03:58,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf/63f30c7091e44bcf81e6beec5967e997 for snapshot=snaptb0-testExportFileSystemState 2024-11-13T20:03:58,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741981_1157 (size=110) 2024-11-13T20:03:58,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741981_1157 (size=110) 2024-11-13T20:03:58,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741981_1157 (size=110) 2024-11-13T20:03:58,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:03:58,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-13T20:03:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-13T20:03:58,546 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,546 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 761b60efd5b7969e107760a98d14345f in 245 msec 2024-11-13T20:03:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741982_1158 (size=110) 2024-11-13T20:03:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741982_1158 (size=110) 2024-11-13T20:03:58,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741982_1158 (size=110) 2024-11-13T20:03:58,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:03:58,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-13T20:03:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-13T20:03:58,557 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,558 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=68, resume processing ppid=66 2024-11-13T20:03:58,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 12ebf52f94b050633db41166224e4585 in 256 msec 2024-11-13T20:03:58,560 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:03:58,561 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:03:58,562 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:03:58,562 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:03:58,562 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:03:58,563 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f] hfiles 2024-11-13T20:03:58,563 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585 2024-11-13T20:03:58,563 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f 2024-11-13T20:03:58,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741983_1159 (size=294) 2024-11-13T20:03:58,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741983_1159 (size=294) 2024-11-13T20:03:58,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741983_1159 (size=294) 2024-11-13T20:03:58,575 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:03:58,576 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-13T20:03:58,577 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-13T20:03:58,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741984_1160 (size=963) 2024-11-13T20:03:58,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741984_1160 (size=963) 2024-11-13T20:03:58,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741984_1160 (size=963) 2024-11-13T20:03:58,591 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:03:58,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T20:03:58,602 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:03:58,603 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-13T20:03:58,604 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:03:58,604 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-13T20:03:58,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 333 msec 2024-11-13T20:03:58,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T20:03:58,905 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T20:03:58,905 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905 2024-11-13T20:03:58,905 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:58,937 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:03:58,937 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-13T20:03:58,939 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:03:58,946 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-13T20:03:58,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741985_1161 (size=165) 2024-11-13T20:03:58,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741985_1161 (size=165) 2024-11-13T20:03:58,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741985_1161 (size=165) 2024-11-13T20:03:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741986_1162 (size=963) 2024-11-13T20:03:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741986_1162 (size=963) 2024-11-13T20:03:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741986_1162 (size=963) 2024-11-13T20:03:59,019 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:59,019 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:03:59,020 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-5541854827059396618.jar 2024-11-13T20:04:00,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,268 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-6386025162896300659.jar 2024-11-13T20:04:00,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:00,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:04:00,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:04:00,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:04:00,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:04:00,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:04:00,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:04:00,271 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:04:00,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:04:00,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:04:00,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:04:00,272 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:04:00,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:00,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:00,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:00,274 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:00,274 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:00,274 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:00,274 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:00,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741987_1163 (size=131440) 2024-11-13T20:04:00,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741987_1163 (size=131440) 2024-11-13T20:04:00,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741987_1163 (size=131440) 2024-11-13T20:04:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741988_1164 (size=4188619) 2024-11-13T20:04:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741988_1164 (size=4188619) 2024-11-13T20:04:00,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741988_1164 (size=4188619) 2024-11-13T20:04:00,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741989_1165 (size=1323991) 2024-11-13T20:04:00,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741989_1165 (size=1323991) 2024-11-13T20:04:00,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741989_1165 (size=1323991) 2024-11-13T20:04:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-13T20:04:00,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-13T20:04:00,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-13T20:04:00,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-13T20:04:00,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741990_1166 (size=903730) 2024-11-13T20:04:00,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741990_1166 (size=903730) 2024-11-13T20:04:00,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741990_1166 (size=903730) 2024-11-13T20:04:00,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741991_1167 (size=8360083) 2024-11-13T20:04:00,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741991_1167 (size=8360083) 2024-11-13T20:04:00,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741991_1167 (size=8360083) 2024-11-13T20:04:00,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741992_1168 (size=1877034) 2024-11-13T20:04:00,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741992_1168 (size=1877034) 2024-11-13T20:04:00,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741992_1168 (size=1877034) 2024-11-13T20:04:00,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741993_1169 (size=77835) 2024-11-13T20:04:00,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741993_1169 (size=77835) 2024-11-13T20:04:00,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741993_1169 (size=77835) 2024-11-13T20:04:00,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741994_1170 (size=30949) 2024-11-13T20:04:00,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741994_1170 (size=30949) 2024-11-13T20:04:00,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741994_1170 (size=30949) 2024-11-13T20:04:00,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741995_1171 (size=1597327) 2024-11-13T20:04:00,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741995_1171 (size=1597327) 2024-11-13T20:04:00,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741995_1171 (size=1597327) 2024-11-13T20:04:01,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0002_000001 (auth:SIMPLE) from 127.0.0.1:60312 2024-11-13T20:04:01,217 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0002/container_1731528172678_0002_01_000001/launch_container.sh] 2024-11-13T20:04:01,217 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0002/container_1731528172678_0002_01_000001/container_tokens] 2024-11-13T20:04:01,217 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0002/container_1731528172678_0002_01_000001/sysfs] 2024-11-13T20:04:01,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741996_1172 (size=4695811) 2024-11-13T20:04:01,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741996_1172 (size=4695811) 2024-11-13T20:04:01,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741996_1172 (size=4695811) 2024-11-13T20:04:01,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741997_1173 (size=232957) 2024-11-13T20:04:01,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741997_1173 (size=232957) 2024-11-13T20:04:01,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741997_1173 (size=232957) 2024-11-13T20:04:01,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741998_1174 (size=127628) 2024-11-13T20:04:01,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741998_1174 (size=127628) 2024-11-13T20:04:01,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741998_1174 (size=127628) 2024-11-13T20:04:01,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741999_1175 (size=20406) 2024-11-13T20:04:01,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741999_1175 (size=20406) 2024-11-13T20:04:01,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741999_1175 (size=20406) 2024-11-13T20:04:01,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742000_1176 (size=5175431) 2024-11-13T20:04:01,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742000_1176 (size=5175431) 2024-11-13T20:04:01,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742000_1176 (size=5175431) 2024-11-13T20:04:01,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742001_1177 (size=6424542) 2024-11-13T20:04:01,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742001_1177 (size=6424542) 2024-11-13T20:04:01,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742001_1177 (size=6424542) 2024-11-13T20:04:01,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742002_1178 (size=217634) 2024-11-13T20:04:01,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742002_1178 (size=217634) 2024-11-13T20:04:01,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742002_1178 (size=217634) 2024-11-13T20:04:01,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742003_1179 (size=1832290) 2024-11-13T20:04:01,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742003_1179 (size=1832290) 2024-11-13T20:04:01,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742003_1179 (size=1832290) 2024-11-13T20:04:01,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742004_1180 (size=322274) 2024-11-13T20:04:01,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742004_1180 (size=322274) 2024-11-13T20:04:01,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742004_1180 (size=322274) 2024-11-13T20:04:01,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742005_1181 (size=503880) 2024-11-13T20:04:01,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742005_1181 (size=503880) 2024-11-13T20:04:01,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742005_1181 (size=503880) 2024-11-13T20:04:01,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742006_1182 (size=440392) 2024-11-13T20:04:01,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742006_1182 (size=440392) 2024-11-13T20:04:01,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742006_1182 (size=440392) 2024-11-13T20:04:01,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742007_1183 (size=29229) 2024-11-13T20:04:01,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742007_1183 (size=29229) 2024-11-13T20:04:01,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742007_1183 (size=29229) 2024-11-13T20:04:01,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742008_1184 (size=24096) 2024-11-13T20:04:01,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742008_1184 (size=24096) 2024-11-13T20:04:01,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742008_1184 (size=24096) 2024-11-13T20:04:01,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742009_1185 (size=111872) 2024-11-13T20:04:01,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742009_1185 (size=111872) 2024-11-13T20:04:01,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742009_1185 (size=111872) 2024-11-13T20:04:01,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742010_1186 (size=45609) 2024-11-13T20:04:01,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742010_1186 (size=45609) 2024-11-13T20:04:01,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742010_1186 (size=45609) 2024-11-13T20:04:01,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742011_1187 (size=136454) 2024-11-13T20:04:01,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742011_1187 (size=136454) 2024-11-13T20:04:01,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742011_1187 (size=136454) 2024-11-13T20:04:01,901 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:04:01,904 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-13T20:04:01,908 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.4 K 2024-11-13T20:04:01,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742012_1188 (size=726) 2024-11-13T20:04:01,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742012_1188 (size=726) 2024-11-13T20:04:01,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742012_1188 (size=726) 2024-11-13T20:04:01,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742013_1189 (size=15) 2024-11-13T20:04:01,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742013_1189 (size=15) 2024-11-13T20:04:01,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742013_1189 (size=15) 2024-11-13T20:04:01,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742014_1190 (size=303734) 2024-11-13T20:04:01,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742014_1190 (size=303734) 2024-11-13T20:04:01,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742014_1190 (size=303734) 2024-11-13T20:04:01,992 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:04:01,992 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:04:02,038 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:04:02,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0003_000001 (auth:SIMPLE) from 127.0.0.1:57944 2024-11-13T20:04:08,849 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:04:09,496 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0003_000001 (auth:SIMPLE) from 127.0.0.1:36482 2024-11-13T20:04:09,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742015_1191 (size=349384) 2024-11-13T20:04:09,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742015_1191 (size=349384) 2024-11-13T20:04:09,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742015_1191 (size=349384) 2024-11-13T20:04:11,881 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0003_000001 (auth:SIMPLE) from 127.0.0.1:50212 2024-11-13T20:04:16,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742016_1192 (size=14794) 2024-11-13T20:04:16,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742016_1192 (size=14794) 2024-11-13T20:04:16,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742016_1192 (size=14794) 2024-11-13T20:04:16,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742017_1193 (size=8102) 2024-11-13T20:04:16,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742017_1193 (size=8102) 2024-11-13T20:04:16,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742017_1193 (size=8102) 2024-11-13T20:04:16,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742018_1194 (size=6121) 2024-11-13T20:04:16,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742018_1194 (size=6121) 2024-11-13T20:04:16,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742018_1194 (size=6121) 2024-11-13T20:04:16,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742019_1195 (size=5171) 2024-11-13T20:04:16,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742019_1195 (size=5171) 2024-11-13T20:04:16,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742019_1195 (size=5171) 2024-11-13T20:04:16,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742020_1196 (size=17462) 2024-11-13T20:04:16,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742020_1196 (size=17462) 2024-11-13T20:04:16,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742020_1196 (size=17462) 2024-11-13T20:04:16,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742021_1197 (size=465) 2024-11-13T20:04:16,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742021_1197 (size=465) 2024-11-13T20:04:16,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742021_1197 (size=465) 2024-11-13T20:04:16,732 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0003/container_1731528172678_0003_01_000002/launch_container.sh] 2024-11-13T20:04:16,732 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0003/container_1731528172678_0003_01_000002/container_tokens] 2024-11-13T20:04:16,732 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0003/container_1731528172678_0003_01_000002/sysfs] 2024-11-13T20:04:16,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742022_1198 (size=17462) 2024-11-13T20:04:16,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742022_1198 (size=17462) 2024-11-13T20:04:16,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742022_1198 (size=17462) 2024-11-13T20:04:16,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742023_1199 (size=349384) 2024-11-13T20:04:16,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742023_1199 (size=349384) 2024-11-13T20:04:16,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742023_1199 (size=349384) 2024-11-13T20:04:17,236 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0003_000001 (auth:SIMPLE) from 127.0.0.1:50224 2024-11-13T20:04:18,321 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:04:18,322 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:04:18,331 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-13T20:04:18,331 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:04:18,332 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:04:18,332 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-13T20:04:18,332 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-13T20:04:18,332 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-13T20:04:18,332 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-13T20:04:18,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-13T20:04:18,333 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528238905/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-13T20:04:18,342 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-13T20:04:18,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,347 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528258347"}]},"ts":"1731528258347"} 2024-11-13T20:04:18,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-13T20:04:18,352 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-13T20:04:18,352 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-13T20:04:18,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-13T20:04:18,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, UNASSIGN}] 2024-11-13T20:04:18,357 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, UNASSIGN 2024-11-13T20:04:18,357 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, UNASSIGN 2024-11-13T20:04:18,359 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=761b60efd5b7969e107760a98d14345f, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:04:18,361 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=12ebf52f94b050633db41166224e4585, regionState=CLOSING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:04:18,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, UNASSIGN because future has completed 2024-11-13T20:04:18,364 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:04:18,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:04:18,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, UNASSIGN because future has completed 2024-11-13T20:04:18,365 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:04:18,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:04:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-13T20:04:18,455 ERROR [ProcedureDispatcherTimeoutThread {}] procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread(331): DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting elapsed. If this is repeated consistently, it means no element is getting expired from the queue and it might freeze the system. Queue: [containedObject=54d0784d1943,33333,1731528161197, timeout=1731528258516, delay=60, operations=[pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197], containedObject=54d0784d1943,39533,1731528161273, timeout=1731528258522, delay=66, operations=[pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273]] 2024-11-13T20:04:18,517 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close 761b60efd5b7969e107760a98d14345f 2024-11-13T20:04:18,517 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:04:18,517 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing 761b60efd5b7969e107760a98d14345f, disabling compactions & flushes 2024-11-13T20:04:18,518 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:04:18,518 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:04:18,518 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. after waiting 0 ms 2024-11-13T20:04:18,518 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:04:18,523 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close 12ebf52f94b050633db41166224e4585 2024-11-13T20:04:18,523 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:04:18,523 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:04:18,523 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing 12ebf52f94b050633db41166224e4585, disabling compactions & flushes 2024-11-13T20:04:18,523 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:04:18,523 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:04:18,523 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. after waiting 0 ms 2024-11-13T20:04:18,523 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:04:18,524 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:04:18,524 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f. 2024-11-13T20:04:18,524 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for 761b60efd5b7969e107760a98d14345f: Waiting for close lock at 1731528258517Running coprocessor pre-close hooks at 1731528258517Disabling compacts and flushes for region at 1731528258517Disabling writes for close at 1731528258518 (+1 ms)Writing region close event to WAL at 1731528258519 (+1 ms)Running coprocessor post-close hooks at 1731528258524 (+5 ms)Closed at 1731528258524 2024-11-13T20:04:18,527 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=761b60efd5b7969e107760a98d14345f, regionState=CLOSED 2024-11-13T20:04:18,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:04:18,531 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:04:18,531 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:04:18,531 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585. 2024-11-13T20:04:18,532 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for 12ebf52f94b050633db41166224e4585: Waiting for close lock at 1731528258523Running coprocessor pre-close hooks at 1731528258523Disabling compacts and flushes for region at 1731528258523Disabling writes for close at 1731528258523Writing region close event to WAL at 1731528258524 (+1 ms)Running coprocessor post-close hooks at 1731528258531 (+7 ms)Closed at 1731528258531 2024-11-13T20:04:18,534 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed 12ebf52f94b050633db41166224e4585 2024-11-13T20:04:18,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-13T20:04:18,535 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=12ebf52f94b050633db41166224e4585, regionState=CLOSED 2024-11-13T20:04:18,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure 761b60efd5b7969e107760a98d14345f, server=54d0784d1943,33333,1731528161197 in 167 msec 2024-11-13T20:04:18,536 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed 761b60efd5b7969e107760a98d14345f 2024-11-13T20:04:18,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:04:18,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=761b60efd5b7969e107760a98d14345f, UNASSIGN in 180 msec 2024-11-13T20:04:18,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=72 2024-11-13T20:04:18,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 12ebf52f94b050633db41166224e4585, server=54d0784d1943,39533,1731528161273 in 173 msec 2024-11-13T20:04:18,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=70 2024-11-13T20:04:18,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=12ebf52f94b050633db41166224e4585, UNASSIGN in 184 msec 2024-11-13T20:04:18,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-13T20:04:18,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 189 msec 2024-11-13T20:04:18,545 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528258544"}]},"ts":"1731528258544"} 2024-11-13T20:04:18,546 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-13T20:04:18,546 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-13T20:04:18,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 205 msec 2024-11-13T20:04:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-13T20:04:18,666 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T20:04:18,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-13T20:04:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,669 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-13T20:04:18,671 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,676 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-13T20:04:18,677 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f 2024-11-13T20:04:18,680 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/recovered.edits] 2024-11-13T20:04:18,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T20:04:18,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T20:04:18,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T20:04:18,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T20:04:18,687 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf/1914b3cc51a34b418c62c050249453df to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/cf/1914b3cc51a34b418c62c050249453df 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T20:04:18,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:18,690 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585 2024-11-13T20:04:18,693 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f/recovered.edits/9.seqid 2024-11-13T20:04:18,706 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/761b60efd5b7969e107760a98d14345f 2024-11-13T20:04:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-13T20:04:18,708 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/recovered.edits] 2024-11-13T20:04:18,717 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf/63f30c7091e44bcf81e6beec5967e997 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/cf/63f30c7091e44bcf81e6beec5967e997 2024-11-13T20:04:18,721 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585/recovered.edits/9.seqid 2024-11-13T20:04:18,722 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemState/12ebf52f94b050633db41166224e4585 2024-11-13T20:04:18,722 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-13T20:04:18,723 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-13T20:04:18,724 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-11-13T20:04:18,729 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113bfbe667eb39e4abf8da0dd6e35d49a1f_12ebf52f94b050633db41166224e4585 2024-11-13T20:04:18,732 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024111399212b70433b4992b7d1cb1c78495fe4_761b60efd5b7969e107760a98d14345f 2024-11-13T20:04:18,733 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-13T20:04:18,737 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,741 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-13T20:04:18,747 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-13T20:04:18,749 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,749 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-13T20:04:18,749 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528258749"}]},"ts":"9223372036854775807"} 2024-11-13T20:04:18,749 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528258749"}]},"ts":"9223372036854775807"} 2024-11-13T20:04:18,752 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:04:18,753 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 761b60efd5b7969e107760a98d14345f, NAME => 'testtb-testExportFileSystemState,,1731528237227.761b60efd5b7969e107760a98d14345f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 12ebf52f94b050633db41166224e4585, NAME => 'testtb-testExportFileSystemState,1,1731528237227.12ebf52f94b050633db41166224e4585.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:04:18,753 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-13T20:04:18,754 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528258753"}]},"ts":"9223372036854775807"} 2024-11-13T20:04:18,758 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-13T20:04:18,760 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T20:04:18,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 93 msec 2024-11-13T20:04:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-13T20:04:18,816 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-13T20:04:18,816 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T20:04:18,828 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-13T20:04:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-13T20:04:18,834 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-13T20:04:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-13T20:04:18,871 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=775 (was 772) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:60964 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2815 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:41559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:60372 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 31568) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:43371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:58918 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1678725344_1 at /127.0.0.1:60346 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35263 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=778 (was 781), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=951 (was 985), ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=429 (was 989) 2024-11-13T20:04:18,871 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=775 is superior to 500 2024-11-13T20:04:18,904 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=775, OpenFileDescriptor=778, MaxFileDescriptor=1048576, SystemLoadAverage=951, ProcessCount=22, AvailableMemoryMB=426 2024-11-13T20:04:18,904 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=775 is superior to 500 2024-11-13T20:04:18,906 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:04:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:18,910 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:04:18,910 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-13T20:04:18,911 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:04:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T20:04:18,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742024_1200 (size=440) 2024-11-13T20:04:18,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742024_1200 (size=440) 2024-11-13T20:04:18,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742024_1200 (size=440) 2024-11-13T20:04:18,931 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 67f30c74b742d8a96fd8d8102693bd6e, NAME => 'testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:18,931 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 15be569b044ae47e8bf0566391821de3, NAME => 'testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:18,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742025_1201 (size=65) 2024-11-13T20:04:18,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742025_1201 (size=65) 2024-11-13T20:04:18,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742025_1201 (size=65) 2024-11-13T20:04:18,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:18,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 15be569b044ae47e8bf0566391821de3, disabling compactions & flushes 2024-11-13T20:04:18,948 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:18,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:18,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. after waiting 0 ms 2024-11-13T20:04:18,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:18,948 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:18,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 15be569b044ae47e8bf0566391821de3: Waiting for close lock at 1731528258948Disabling compacts and flushes for region at 1731528258948Disabling writes for close at 1731528258948Writing region close event to WAL at 1731528258948Closed at 1731528258948 2024-11-13T20:04:18,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742026_1202 (size=65) 2024-11-13T20:04:18,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742026_1202 (size=65) 2024-11-13T20:04:18,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742026_1202 (size=65) 2024-11-13T20:04:18,956 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:18,956 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 67f30c74b742d8a96fd8d8102693bd6e, disabling compactions & flushes 2024-11-13T20:04:18,956 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:18,956 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:18,956 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. after waiting 0 ms 2024-11-13T20:04:18,956 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:18,956 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:18,956 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 67f30c74b742d8a96fd8d8102693bd6e: Waiting for close lock at 1731528258956Disabling compacts and flushes for region at 1731528258956Disabling writes for close at 1731528258956Writing region close event to WAL at 1731528258956Closed at 1731528258956 2024-11-13T20:04:18,958 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:04:18,958 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731528258958"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528258958"}]},"ts":"1731528258958"} 2024-11-13T20:04:18,958 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731528258958"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528258958"}]},"ts":"1731528258958"} 2024-11-13T20:04:18,961 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:04:18,962 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:04:18,962 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528258962"}]},"ts":"1731528258962"} 2024-11-13T20:04:18,965 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-13T20:04:18,965 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:04:18,968 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:04:18,968 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:04:18,968 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:04:18,969 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:04:18,969 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:04:18,969 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:04:18,969 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:04:18,969 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:04:18,969 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:04:18,969 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:04:18,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, ASSIGN}] 2024-11-13T20:04:18,971 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, ASSIGN 2024-11-13T20:04:18,971 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, ASSIGN 2024-11-13T20:04:18,973 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:04:18,974 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:04:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T20:04:19,124 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:04:19,124 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=15be569b044ae47e8bf0566391821de3, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:04:19,124 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=67f30c74b742d8a96fd8d8102693bd6e, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:04:19,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, ASSIGN because future has completed 2024-11-13T20:04:19,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15be569b044ae47e8bf0566391821de3, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:04:19,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, ASSIGN because future has completed 2024-11-13T20:04:19,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:04:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T20:04:19,284 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:19,285 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => 15be569b044ae47e8bf0566391821de3, NAME => 'testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:04:19,285 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. service=AccessControlService 2024-11-13T20:04:19,285 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:04:19,286 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,286 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:19,286 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,286 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,289 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,289 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => 67f30c74b742d8a96fd8d8102693bd6e, NAME => 'testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:04:19,289 INFO [StoreOpener-15be569b044ae47e8bf0566391821de3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,290 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. service=AccessControlService 2024-11-13T20:04:19,290 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:04:19,290 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,290 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:19,290 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,290 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,291 INFO [StoreOpener-15be569b044ae47e8bf0566391821de3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15be569b044ae47e8bf0566391821de3 columnFamilyName cf 2024-11-13T20:04:19,292 INFO [StoreOpener-67f30c74b742d8a96fd8d8102693bd6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,293 DEBUG [StoreOpener-15be569b044ae47e8bf0566391821de3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:19,294 INFO [StoreOpener-15be569b044ae47e8bf0566391821de3-1 {}] regionserver.HStore(327): Store=15be569b044ae47e8bf0566391821de3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:04:19,295 INFO [StoreOpener-67f30c74b742d8a96fd8d8102693bd6e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67f30c74b742d8a96fd8d8102693bd6e columnFamilyName cf 2024-11-13T20:04:19,295 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,296 DEBUG [StoreOpener-67f30c74b742d8a96fd8d8102693bd6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:19,296 INFO [StoreOpener-67f30c74b742d8a96fd8d8102693bd6e-1 {}] regionserver.HStore(327): Store=67f30c74b742d8a96fd8d8102693bd6e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:04:19,297 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,297 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,297 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,298 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,298 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,298 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,300 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,300 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,300 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,300 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,303 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,304 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:04:19,304 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened 15be569b044ae47e8bf0566391821de3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69011333, jitterRate=0.028348997235298157}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:04:19,304 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,305 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for 15be569b044ae47e8bf0566391821de3: Running coprocessor pre-open hook at 1731528259286Writing region info on filesystem at 1731528259286Initializing all the Stores at 1731528259287 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528259287Cleaning up temporary data from old regions at 1731528259298 (+11 ms)Running coprocessor post-open hooks at 1731528259304 (+6 ms)Region opened successfully at 1731528259305 (+1 ms) 2024-11-13T20:04:19,306 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3., pid=79, masterSystemTime=1731528259280 2024-11-13T20:04:19,307 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:04:19,308 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened 67f30c74b742d8a96fd8d8102693bd6e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63189618, jitterRate=-0.05840131640434265}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:04:19,308 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,309 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for 67f30c74b742d8a96fd8d8102693bd6e: Running coprocessor pre-open hook at 1731528259290Writing region info on filesystem at 1731528259290Initializing all the Stores at 1731528259291 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528259291Cleaning up temporary data from old regions at 1731528259300 (+9 ms)Running coprocessor post-open hooks at 1731528259308 (+8 ms)Region opened successfully at 1731528259308 2024-11-13T20:04:19,310 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:19,310 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:19,310 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e., pid=80, masterSystemTime=1731528259282 2024-11-13T20:04:19,310 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=15be569b044ae47e8bf0566391821de3, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:04:19,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15be569b044ae47e8bf0566391821de3, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:04:19,313 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,313 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,313 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=67f30c74b742d8a96fd8d8102693bd6e, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:04:19,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:04:19,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=78 2024-11-13T20:04:19,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 15be569b044ae47e8bf0566391821de3, server=54d0784d1943,39533,1731528161273 in 186 msec 2024-11-13T20:04:19,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=77 2024-11-13T20:04:19,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, ASSIGN in 349 msec 2024-11-13T20:04:19,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e, server=54d0784d1943,36439,1731528160987 in 188 msec 2024-11-13T20:04:19,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-13T20:04:19,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, ASSIGN in 350 msec 2024-11-13T20:04:19,322 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:04:19,322 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528259322"}]},"ts":"1731528259322"} 2024-11-13T20:04:19,324 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-13T20:04:19,325 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:04:19,325 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-13T20:04:19,330 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-13T20:04:19,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:19,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:19,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:19,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:19,343 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:19,343 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:19,343 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:19,343 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:19,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 436 msec 2024-11-13T20:04:19,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T20:04:19,536 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T20:04:19,536 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,539 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-13T20:04:19,539 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,539 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:04:19,542 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,547 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,554 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,557 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T20:04:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528259557 (current time:1731528259557). 2024-11-13T20:04:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:04:19,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-13T20:04:19,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:04:19,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d809cf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:04:19,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:04:19,560 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:04:19,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:04:19,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:04:19,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15f2bb5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:04:19,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:04:19,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,562 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:04:19,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@199d02fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:04:19,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:04:19,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:04:19,565 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:04:19,566 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:04:19,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:04:19,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,567 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:04:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7365232a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:04:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:04:19,569 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:04:19,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:04:19,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:04:19,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e14af9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:04:19,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:04:19,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,570 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60460, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:04:19,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28327525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:04:19,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:04:19,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:04:19,574 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:04:19,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:04:19,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:04:19,577 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51456, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:04:19,578 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:04:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:04:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,578 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:04:19,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-13T20:04:19,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:04:19,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T20:04:19,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-13T20:04:19,581 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:04:19,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T20:04:19,582 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:04:19,585 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:04:19,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742027_1203 (size=161) 2024-11-13T20:04:19,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742027_1203 (size=161) 2024-11-13T20:04:19,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742027_1203 (size=161) 2024-11-13T20:04:19,595 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:04:19,595 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3}] 2024-11-13T20:04:19,596 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,597 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T20:04:19,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-13T20:04:19,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for 67f30c74b742d8a96fd8d8102693bd6e: 2024-11-13T20:04:19,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. for emptySnaptb0-testConsecutiveExports completed. 2024-11-13T20:04:19,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 15be569b044ae47e8bf0566391821de3: 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. for emptySnaptb0-testConsecutiveExports completed. 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:04:19,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:04:19,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742028_1204 (size=68) 2024-11-13T20:04:19,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742028_1204 (size=68) 2024-11-13T20:04:19,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742028_1204 (size=68) 2024-11-13T20:04:19,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-13T20:04:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-13T20:04:19,759 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,759 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:19,761 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e in 165 msec 2024-11-13T20:04:19,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742029_1205 (size=68) 2024-11-13T20:04:19,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742029_1205 (size=68) 2024-11-13T20:04:19,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742029_1205 (size=68) 2024-11-13T20:04:19,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:19,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-13T20:04:19,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-13T20:04:19,773 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,773 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=81 2024-11-13T20:04:19,776 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:04:19,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3 in 179 msec 2024-11-13T20:04:19,777 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:04:19,778 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:04:19,778 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:04:19,778 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:19,778 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:04:19,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742030_1206 (size=60) 2024-11-13T20:04:19,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742030_1206 (size=60) 2024-11-13T20:04:19,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742030_1206 (size=60) 2024-11-13T20:04:19,786 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:04:19,786 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-13T20:04:19,787 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-13T20:04:19,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742031_1207 (size=641) 2024-11-13T20:04:19,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742031_1207 (size=641) 2024-11-13T20:04:19,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742031_1207 (size=641) 2024-11-13T20:04:19,801 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:04:19,806 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:04:19,807 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-13T20:04:19,809 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:04:19,809 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-13T20:04:19,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 230 msec 2024-11-13T20:04:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T20:04:19,895 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T20:04:19,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:04:19,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:04:19,905 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-13T20:04:19,908 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:19,908 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:04:19,910 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,915 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,920 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T20:04:19,923 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T20:04:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528259923 (current time:1731528259923). 2024-11-13T20:04:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:04:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-13T20:04:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:04:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b578cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:04:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:04:19,925 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:04:19,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:04:19,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:04:19,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a8f5a8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:04:19,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:04:19,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,926 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:04:19,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@563a07c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:04:19,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:04:19,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:04:19,929 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:04:19,930 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:04:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:04:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,931 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:04:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34862531, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:04:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:04:19,932 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:04:19,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:04:19,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:04:19,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@682555b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:04:19,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:04:19,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,933 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:04:19,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aaed398, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:04:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:04:19,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:04:19,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:04:19,936 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55514, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:04:19,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:04:19,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:04:19,939 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51462, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:04:19,940 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:04:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:04:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:04:19,940 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:04:19,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-13T20:04:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:04:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T20:04:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-13T20:04:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T20:04:19,943 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:04:19,944 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:04:19,946 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:04:19,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742032_1208 (size=156) 2024-11-13T20:04:19,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742032_1208 (size=156) 2024-11-13T20:04:19,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742032_1208 (size=156) 2024-11-13T20:04:19,953 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:04:19,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3}] 2024-11-13T20:04:19,954 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:19,954 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T20:04:20,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-13T20:04:20,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-13T20:04:20,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:20,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:20,107 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing 67f30c74b742d8a96fd8d8102693bd6e 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-13T20:04:20,107 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 15be569b044ae47e8bf0566391821de3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-13T20:04:20,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3 is 71, key is 17966d41e9c1464128a5806dbe2b8963/cf:q/1731528259903/Put/seqid=0 2024-11-13T20:04:20,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e is 71, key is 09e2605a6c9636bf74e9740b65f94651/cf:q/1731528259901/Put/seqid=0 2024-11-13T20:04:20,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742033_1209 (size=8102) 2024-11-13T20:04:20,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742033_1209 (size=8102) 2024-11-13T20:04:20,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742033_1209 (size=8102) 2024-11-13T20:04:20,135 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:20,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742034_1210 (size=5172) 2024-11-13T20:04:20,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742034_1210 (size=5172) 2024-11-13T20:04:20,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742034_1210 (size=5172) 2024-11-13T20:04:20,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:20,141 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:20,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/.tmp/cf/5babe1d51b33474cbb96c7746ac7096b, store: [table=testtb-testConsecutiveExports family=cf region=15be569b044ae47e8bf0566391821de3] 2024-11-13T20:04:20,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/.tmp/cf/5babe1d51b33474cbb96c7746ac7096b is 206, key is 159c4661d3b6388c2e01eaa00da375865/cf:q/1731528259903/Put/seqid=0 2024-11-13T20:04:20,147 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:20,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/.tmp/cf/a1727c432ec741a2b9421dcdbb689f62, store: [table=testtb-testConsecutiveExports family=cf region=67f30c74b742d8a96fd8d8102693bd6e] 2024-11-13T20:04:20,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/.tmp/cf/a1727c432ec741a2b9421dcdbb689f62 is 206, key is 06dd6f56aec01e0d9111683ec9d2579bf/cf:q/1731528259901/Put/seqid=0 2024-11-13T20:04:20,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742035_1211 (size=14653) 2024-11-13T20:04:20,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742035_1211 (size=14653) 2024-11-13T20:04:20,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742035_1211 (size=14653) 2024-11-13T20:04:20,165 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/.tmp/cf/5babe1d51b33474cbb96c7746ac7096b 2024-11-13T20:04:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742036_1212 (size=6108) 2024-11-13T20:04:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742036_1212 (size=6108) 2024-11-13T20:04:20,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742036_1212 (size=6108) 2024-11-13T20:04:20,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/.tmp/cf/a1727c432ec741a2b9421dcdbb689f62 2024-11-13T20:04:20,173 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/.tmp/cf/5babe1d51b33474cbb96c7746ac7096b as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf/5babe1d51b33474cbb96c7746ac7096b 2024-11-13T20:04:20,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/.tmp/cf/a1727c432ec741a2b9421dcdbb689f62 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf/a1727c432ec741a2b9421dcdbb689f62 2024-11-13T20:04:20,179 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf/5babe1d51b33474cbb96c7746ac7096b, entries=46, sequenceid=6, filesize=14.3 K 2024-11-13T20:04:20,180 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 15be569b044ae47e8bf0566391821de3 in 73ms, sequenceid=6, compaction requested=false 2024-11-13T20:04:20,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-13T20:04:20,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf/a1727c432ec741a2b9421dcdbb689f62, entries=4, sequenceid=6, filesize=6.0 K 2024-11-13T20:04:20,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 67f30c74b742d8a96fd8d8102693bd6e in 74ms, sequenceid=6, compaction requested=false 2024-11-13T20:04:20,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for 67f30c74b742d8a96fd8d8102693bd6e: 2024-11-13T20:04:20,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 15be569b044ae47e8bf0566391821de3: 2024-11-13T20:04:20,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. for snaptb0-testConsecutiveExports completed. 2024-11-13T20:04:20,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. for snaptb0-testConsecutiveExports completed. 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf/a1727c432ec741a2b9421dcdbb689f62] hfiles 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf/a1727c432ec741a2b9421dcdbb689f62 for snapshot=snaptb0-testConsecutiveExports 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf/5babe1d51b33474cbb96c7746ac7096b] hfiles 2024-11-13T20:04:20,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf/5babe1d51b33474cbb96c7746ac7096b for snapshot=snaptb0-testConsecutiveExports 2024-11-13T20:04:20,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742037_1213 (size=107) 2024-11-13T20:04:20,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742037_1213 (size=107) 2024-11-13T20:04:20,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742037_1213 (size=107) 2024-11-13T20:04:20,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:20,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-13T20:04:20,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-13T20:04:20,196 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:20,197 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:20,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e in 244 msec 2024-11-13T20:04:20,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742038_1214 (size=107) 2024-11-13T20:04:20,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742038_1214 (size=107) 2024-11-13T20:04:20,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742038_1214 (size=107) 2024-11-13T20:04:20,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:20,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-13T20:04:20,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-13T20:04:20,203 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:20,203 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:20,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-13T20:04:20,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 15be569b044ae47e8bf0566391821de3 in 251 msec 2024-11-13T20:04:20,206 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:04:20,206 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:04:20,207 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:04:20,207 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:04:20,207 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:20,209 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e] hfiles 2024-11-13T20:04:20,209 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:20,209 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:20,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742039_1215 (size=291) 2024-11-13T20:04:20,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742039_1215 (size=291) 2024-11-13T20:04:20,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742039_1215 (size=291) 2024-11-13T20:04:20,218 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:04:20,218 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-13T20:04:20,219 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T20:04:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742040_1216 (size=951) 2024-11-13T20:04:20,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742040_1216 (size=951) 2024-11-13T20:04:20,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742040_1216 (size=951) 2024-11-13T20:04:20,243 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:04:20,251 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:04:20,252 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T20:04:20,254 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:04:20,254 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-13T20:04:20,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 313 msec 2024-11-13T20:04:20,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T20:04:20,266 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T20:04:20,266 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266 2024-11-13T20:04:20,266 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:20,297 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:20,297 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@201695cb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T20:04:20,299 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:04:20,303 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T20:04:20,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:20,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:20,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:20,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-13T20:04:20,608 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-13T20:04:20,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-13T20:04:21,477 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-15554241532399266720.jar 2024-11-13T20:04:21,478 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,478 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-14330173627442034424.jar 2024-11-13T20:04:21,548 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,549 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,549 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:21,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:04:21,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:04:21,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:04:21,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:04:21,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:04:21,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:04:21,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:04:21,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:04:21,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:04:21,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:04:21,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:04:21,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:21,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:21,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:21,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:21,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:21,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:21,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:21,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742041_1217 (size=131440) 2024-11-13T20:04:21,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742041_1217 (size=131440) 2024-11-13T20:04:21,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742041_1217 (size=131440) 2024-11-13T20:04:21,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742042_1218 (size=4188619) 2024-11-13T20:04:21,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742042_1218 (size=4188619) 2024-11-13T20:04:21,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742042_1218 (size=4188619) 2024-11-13T20:04:21,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742043_1219 (size=1323991) 2024-11-13T20:04:21,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742043_1219 (size=1323991) 2024-11-13T20:04:21,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742043_1219 (size=1323991) 2024-11-13T20:04:21,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742044_1220 (size=903730) 2024-11-13T20:04:21,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742044_1220 (size=903730) 2024-11-13T20:04:21,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742044_1220 (size=903730) 2024-11-13T20:04:21,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742045_1221 (size=8360083) 2024-11-13T20:04:21,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742045_1221 (size=8360083) 2024-11-13T20:04:21,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742045_1221 (size=8360083) 2024-11-13T20:04:21,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742046_1222 (size=1877034) 2024-11-13T20:04:21,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742046_1222 (size=1877034) 2024-11-13T20:04:21,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742046_1222 (size=1877034) 2024-11-13T20:04:21,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742047_1223 (size=77835) 2024-11-13T20:04:21,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742047_1223 (size=77835) 2024-11-13T20:04:21,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742047_1223 (size=77835) 2024-11-13T20:04:21,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742048_1224 (size=30949) 2024-11-13T20:04:21,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742048_1224 (size=30949) 2024-11-13T20:04:21,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742048_1224 (size=30949) 2024-11-13T20:04:21,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742049_1225 (size=1597327) 2024-11-13T20:04:21,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742049_1225 (size=1597327) 2024-11-13T20:04:21,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742049_1225 (size=1597327) 2024-11-13T20:04:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742050_1226 (size=4695811) 2024-11-13T20:04:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742050_1226 (size=4695811) 2024-11-13T20:04:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742050_1226 (size=4695811) 2024-11-13T20:04:21,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742051_1227 (size=6424542) 2024-11-13T20:04:21,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742051_1227 (size=6424542) 2024-11-13T20:04:21,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742051_1227 (size=6424542) 2024-11-13T20:04:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742052_1228 (size=232957) 2024-11-13T20:04:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742052_1228 (size=232957) 2024-11-13T20:04:21,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742052_1228 (size=232957) 2024-11-13T20:04:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742053_1229 (size=127628) 2024-11-13T20:04:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742053_1229 (size=127628) 2024-11-13T20:04:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742053_1229 (size=127628) 2024-11-13T20:04:22,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742054_1230 (size=20406) 2024-11-13T20:04:22,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742054_1230 (size=20406) 2024-11-13T20:04:22,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742054_1230 (size=20406) 2024-11-13T20:04:22,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742055_1231 (size=5175431) 2024-11-13T20:04:22,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742055_1231 (size=5175431) 2024-11-13T20:04:22,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742055_1231 (size=5175431) 2024-11-13T20:04:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742056_1232 (size=217634) 2024-11-13T20:04:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742056_1232 (size=217634) 2024-11-13T20:04:22,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742056_1232 (size=217634) 2024-11-13T20:04:22,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742057_1233 (size=1832290) 2024-11-13T20:04:22,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742057_1233 (size=1832290) 2024-11-13T20:04:22,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742057_1233 (size=1832290) 2024-11-13T20:04:22,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742058_1234 (size=322274) 2024-11-13T20:04:22,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742058_1234 (size=322274) 2024-11-13T20:04:22,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742058_1234 (size=322274) 2024-11-13T20:04:22,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742059_1235 (size=503880) 2024-11-13T20:04:22,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742059_1235 (size=503880) 2024-11-13T20:04:22,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742059_1235 (size=503880) 2024-11-13T20:04:22,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742060_1236 (size=29229) 2024-11-13T20:04:22,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742060_1236 (size=29229) 2024-11-13T20:04:22,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742060_1236 (size=29229) 2024-11-13T20:04:22,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742061_1237 (size=24096) 2024-11-13T20:04:22,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742061_1237 (size=24096) 2024-11-13T20:04:22,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742061_1237 (size=24096) 2024-11-13T20:04:22,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742062_1238 (size=440392) 2024-11-13T20:04:22,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742062_1238 (size=440392) 2024-11-13T20:04:22,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742062_1238 (size=440392) 2024-11-13T20:04:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742063_1239 (size=111872) 2024-11-13T20:04:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742063_1239 (size=111872) 2024-11-13T20:04:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742063_1239 (size=111872) 2024-11-13T20:04:22,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742064_1240 (size=45609) 2024-11-13T20:04:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742064_1240 (size=45609) 2024-11-13T20:04:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742064_1240 (size=45609) 2024-11-13T20:04:22,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742065_1241 (size=136454) 2024-11-13T20:04:22,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742065_1241 (size=136454) 2024-11-13T20:04:22,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742065_1241 (size=136454) 2024-11-13T20:04:22,499 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:04:22,501 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-13T20:04:22,503 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T20:04:22,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742066_1242 (size=714) 2024-11-13T20:04:22,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742066_1242 (size=714) 2024-11-13T20:04:22,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742066_1242 (size=714) 2024-11-13T20:04:22,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742067_1243 (size=15) 2024-11-13T20:04:22,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742067_1243 (size=15) 2024-11-13T20:04:22,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742067_1243 (size=15) 2024-11-13T20:04:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742068_1244 (size=303777) 2024-11-13T20:04:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742068_1244 (size=303777) 2024-11-13T20:04:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742068_1244 (size=303777) 2024-11-13T20:04:23,360 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:04:23,360 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:04:23,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0003_000001 (auth:SIMPLE) from 127.0.0.1:53110 2024-11-13T20:04:23,377 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_3/usercache/jenkins/appcache/application_1731528172678_0003/container_1731528172678_0003_01_000001/launch_container.sh] 2024-11-13T20:04:23,378 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_3/usercache/jenkins/appcache/application_1731528172678_0003/container_1731528172678_0003_01_000001/container_tokens] 2024-11-13T20:04:23,378 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_3/usercache/jenkins/appcache/application_1731528172678_0003/container_1731528172678_0003_01_000001/sysfs] 2024-11-13T20:04:23,577 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0004_000001 (auth:SIMPLE) from 127.0.0.1:45846 2024-11-13T20:04:24,059 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:04:30,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0004_000001 (auth:SIMPLE) from 127.0.0.1:42708 2024-11-13T20:04:30,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742069_1245 (size=349427) 2024-11-13T20:04:30,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742069_1245 (size=349427) 2024-11-13T20:04:30,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742069_1245 (size=349427) 2024-11-13T20:04:32,654 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0004_000001 (auth:SIMPLE) from 127.0.0.1:51206 2024-11-13T20:04:36,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742070_1246 (size=17451) 2024-11-13T20:04:36,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742070_1246 (size=17451) 2024-11-13T20:04:36,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742070_1246 (size=17451) 2024-11-13T20:04:36,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742071_1247 (size=462) 2024-11-13T20:04:36,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742071_1247 (size=462) 2024-11-13T20:04:36,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742071_1247 (size=462) 2024-11-13T20:04:36,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742072_1248 (size=17451) 2024-11-13T20:04:36,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742072_1248 (size=17451) 2024-11-13T20:04:36,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742072_1248 (size=17451) 2024-11-13T20:04:36,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742073_1249 (size=349427) 2024-11-13T20:04:36,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742073_1249 (size=349427) 2024-11-13T20:04:36,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742073_1249 (size=349427) 2024-11-13T20:04:37,039 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0004_000001 (auth:SIMPLE) from 127.0.0.1:51218 2024-11-13T20:04:38,790 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:04:38,790 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:04:38,793 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-13T20:04:38,793 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:04:38,794 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:04:38,794 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T20:04:38,795 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T20:04:38,795 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T20:04:38,796 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@201695cb in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T20:04:38,796 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T20:04:38,796 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T20:04:38,797 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:38,844 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:38,844 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@201695cb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T20:04:38,847 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:04:38,849 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:04:38,858 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T20:04:38,882 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:38,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:38,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-496533848203791342.jar 2024-11-13T20:04:40,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-18036649204025922784.jar 2024-11-13T20:04:40,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,220 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,221 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,221 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,221 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:04:40,222 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:04:40,222 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:04:40,222 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:04:40,223 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:04:40,223 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:04:40,224 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:04:40,224 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:04:40,224 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:04:40,225 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:04:40,225 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:04:40,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:04:40,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:40,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:40,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:40,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:40,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:04:40,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:40,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:04:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742074_1250 (size=131440) 2024-11-13T20:04:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742074_1250 (size=131440) 2024-11-13T20:04:40,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742074_1250 (size=131440) 2024-11-13T20:04:40,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742075_1251 (size=4188619) 2024-11-13T20:04:40,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742075_1251 (size=4188619) 2024-11-13T20:04:40,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742075_1251 (size=4188619) 2024-11-13T20:04:40,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742076_1252 (size=1323991) 2024-11-13T20:04:40,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742076_1252 (size=1323991) 2024-11-13T20:04:40,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742076_1252 (size=1323991) 2024-11-13T20:04:40,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742077_1253 (size=903730) 2024-11-13T20:04:40,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742077_1253 (size=903730) 2024-11-13T20:04:40,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742077_1253 (size=903730) 2024-11-13T20:04:40,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742078_1254 (size=8360083) 2024-11-13T20:04:40,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742078_1254 (size=8360083) 2024-11-13T20:04:40,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742078_1254 (size=8360083) 2024-11-13T20:04:40,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742079_1255 (size=1877034) 2024-11-13T20:04:40,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742079_1255 (size=1877034) 2024-11-13T20:04:40,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742079_1255 (size=1877034) 2024-11-13T20:04:40,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742080_1256 (size=77835) 2024-11-13T20:04:40,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742080_1256 (size=77835) 2024-11-13T20:04:40,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742080_1256 (size=77835) 2024-11-13T20:04:40,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742081_1257 (size=30949) 2024-11-13T20:04:40,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742081_1257 (size=30949) 2024-11-13T20:04:40,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742081_1257 (size=30949) 2024-11-13T20:04:40,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742082_1258 (size=1597327) 2024-11-13T20:04:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742082_1258 (size=1597327) 2024-11-13T20:04:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742082_1258 (size=1597327) 2024-11-13T20:04:40,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742083_1259 (size=4695811) 2024-11-13T20:04:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742083_1259 (size=4695811) 2024-11-13T20:04:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742083_1259 (size=4695811) 2024-11-13T20:04:40,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742084_1260 (size=232957) 2024-11-13T20:04:40,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742084_1260 (size=232957) 2024-11-13T20:04:40,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742084_1260 (size=232957) 2024-11-13T20:04:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742085_1261 (size=127628) 2024-11-13T20:04:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742085_1261 (size=127628) 2024-11-13T20:04:40,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742085_1261 (size=127628) 2024-11-13T20:04:40,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742086_1262 (size=20406) 2024-11-13T20:04:40,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742086_1262 (size=20406) 2024-11-13T20:04:40,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742086_1262 (size=20406) 2024-11-13T20:04:40,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742087_1263 (size=5175431) 2024-11-13T20:04:40,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742087_1263 (size=5175431) 2024-11-13T20:04:40,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742087_1263 (size=5175431) 2024-11-13T20:04:40,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742088_1264 (size=217634) 2024-11-13T20:04:40,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742088_1264 (size=217634) 2024-11-13T20:04:40,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742088_1264 (size=217634) 2024-11-13T20:04:40,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742089_1265 (size=1832290) 2024-11-13T20:04:40,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742089_1265 (size=1832290) 2024-11-13T20:04:40,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742089_1265 (size=1832290) 2024-11-13T20:04:40,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742090_1266 (size=322274) 2024-11-13T20:04:40,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742090_1266 (size=322274) 2024-11-13T20:04:40,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742090_1266 (size=322274) 2024-11-13T20:04:40,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742091_1267 (size=503880) 2024-11-13T20:04:40,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742091_1267 (size=503880) 2024-11-13T20:04:40,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742091_1267 (size=503880) 2024-11-13T20:04:40,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742092_1268 (size=6424542) 2024-11-13T20:04:40,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742092_1268 (size=6424542) 2024-11-13T20:04:40,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742092_1268 (size=6424542) 2024-11-13T20:04:40,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742093_1269 (size=440392) 2024-11-13T20:04:40,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742093_1269 (size=440392) 2024-11-13T20:04:40,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742093_1269 (size=440392) 2024-11-13T20:04:40,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742094_1270 (size=29229) 2024-11-13T20:04:40,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742094_1270 (size=29229) 2024-11-13T20:04:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742094_1270 (size=29229) 2024-11-13T20:04:40,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742095_1271 (size=24096) 2024-11-13T20:04:40,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742095_1271 (size=24096) 2024-11-13T20:04:40,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742095_1271 (size=24096) 2024-11-13T20:04:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742096_1272 (size=111872) 2024-11-13T20:04:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742096_1272 (size=111872) 2024-11-13T20:04:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742096_1272 (size=111872) 2024-11-13T20:04:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742097_1273 (size=45609) 2024-11-13T20:04:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742097_1273 (size=45609) 2024-11-13T20:04:40,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742097_1273 (size=45609) 2024-11-13T20:04:40,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742098_1274 (size=136454) 2024-11-13T20:04:40,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742098_1274 (size=136454) 2024-11-13T20:04:40,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742098_1274 (size=136454) 2024-11-13T20:04:40,774 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:04:40,776 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-13T20:04:40,779 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T20:04:40,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742099_1275 (size=714) 2024-11-13T20:04:40,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742099_1275 (size=714) 2024-11-13T20:04:40,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742099_1275 (size=714) 2024-11-13T20:04:40,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742100_1276 (size=15) 2024-11-13T20:04:40,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742100_1276 (size=15) 2024-11-13T20:04:40,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742100_1276 (size=15) 2024-11-13T20:04:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742101_1277 (size=303773) 2024-11-13T20:04:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742101_1277 (size=303773) 2024-11-13T20:04:40,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742101_1277 (size=303773) 2024-11-13T20:04:41,910 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_1/usercache/jenkins/appcache/application_1731528172678_0004/container_1731528172678_0004_01_000002/launch_container.sh] 2024-11-13T20:04:41,910 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_1/usercache/jenkins/appcache/application_1731528172678_0004/container_1731528172678_0004_01_000002/container_tokens] 2024-11-13T20:04:41,910 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_1/usercache/jenkins/appcache/application_1731528172678_0004/container_1731528172678_0004_01_000002/sysfs] 2024-11-13T20:04:43,145 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:04:43,145 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:04:43,148 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0004_000001 (auth:SIMPLE) from 127.0.0.1:37548 2024-11-13T20:04:43,163 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0004/container_1731528172678_0004_01_000001/launch_container.sh] 2024-11-13T20:04:43,163 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0004/container_1731528172678_0004_01_000001/container_tokens] 2024-11-13T20:04:43,163 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0004/container_1731528172678_0004_01_000001/sysfs] 2024-11-13T20:04:43,372 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0005_000001 (auth:SIMPLE) from 127.0.0.1:57468 2024-11-13T20:04:48,352 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 67f30c74b742d8a96fd8d8102693bd6e changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:04:48,352 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 15be569b044ae47e8bf0566391821de3 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:04:50,234 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0005_000001 (auth:SIMPLE) from 127.0.0.1:35720 2024-11-13T20:04:50,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742102_1278 (size=349423) 2024-11-13T20:04:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742102_1278 (size=349423) 2024-11-13T20:04:50,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742102_1278 (size=349423) 2024-11-13T20:04:52,576 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0005_000001 (auth:SIMPLE) from 127.0.0.1:45888 2024-11-13T20:04:56,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742103_1279 (size=16925) 2024-11-13T20:04:56,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742103_1279 (size=16925) 2024-11-13T20:04:56,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742103_1279 (size=16925) 2024-11-13T20:04:56,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742104_1280 (size=462) 2024-11-13T20:04:56,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742104_1280 (size=462) 2024-11-13T20:04:56,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742104_1280 (size=462) 2024-11-13T20:04:56,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742105_1281 (size=16925) 2024-11-13T20:04:56,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742105_1281 (size=16925) 2024-11-13T20:04:56,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742105_1281 (size=16925) 2024-11-13T20:04:56,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742106_1282 (size=349423) 2024-11-13T20:04:56,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742106_1282 (size=349423) 2024-11-13T20:04:56,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742106_1282 (size=349423) 2024-11-13T20:04:56,905 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0005_000001 (auth:SIMPLE) from 127.0.0.1:45902 2024-11-13T20:04:58,097 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:04:58,097 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:04:58,103 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-13T20:04:58,103 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:04:58,104 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:04:58,104 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T20:04:58,105 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T20:04:58,105 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T20:04:58,105 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@201695cb in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T20:04:58,105 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T20:04:58,105 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528260266/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T20:04:58,131 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-13T20:04:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T20:04:58,137 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528298136"}]},"ts":"1731528298136"} 2024-11-13T20:04:58,139 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-13T20:04:58,140 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-13T20:04:58,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-13T20:04:58,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, UNASSIGN}] 2024-11-13T20:04:58,144 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, UNASSIGN 2024-11-13T20:04:58,144 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, UNASSIGN 2024-11-13T20:04:58,145 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=15be569b044ae47e8bf0566391821de3, regionState=CLOSING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:04:58,146 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=67f30c74b742d8a96fd8d8102693bd6e, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:04:58,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, UNASSIGN because future has completed 2024-11-13T20:04:58,151 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:04:58,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 15be569b044ae47e8bf0566391821de3, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:04:58,153 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=54d0784d1943,36439,1731528160987, table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T20:04:58,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, UNASSIGN because future has completed 2024-11-13T20:04:58,154 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:04:58,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:04:58,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T20:04:58,306 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:58,306 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:04:58,306 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 15be569b044ae47e8bf0566391821de3, disabling compactions & flushes 2024-11-13T20:04:58,307 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:58,307 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. after waiting 0 ms 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing 67f30c74b742d8a96fd8d8102693bd6e, disabling compactions & flushes 2024-11-13T20:04:58,307 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. after waiting 0 ms 2024-11-13T20:04:58,307 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:58,424 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:04:58,425 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:04:58,425 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e. 2024-11-13T20:04:58,425 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for 67f30c74b742d8a96fd8d8102693bd6e: Waiting for close lock at 1731528298307Running coprocessor pre-close hooks at 1731528298307Disabling compacts and flushes for region at 1731528298307Disabling writes for close at 1731528298307Writing region close event to WAL at 1731528298353 (+46 ms)Running coprocessor post-close hooks at 1731528298425 (+72 ms)Closed at 1731528298425 2024-11-13T20:04:58,428 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:04:58,429 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed 67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:58,430 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:04:58,430 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=67f30c74b742d8a96fd8d8102693bd6e, regionState=CLOSED 2024-11-13T20:04:58,430 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3. 2024-11-13T20:04:58,430 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 15be569b044ae47e8bf0566391821de3: Waiting for close lock at 1731528298306Running coprocessor pre-close hooks at 1731528298306Disabling compacts and flushes for region at 1731528298306Disabling writes for close at 1731528298307 (+1 ms)Writing region close event to WAL at 1731528298330 (+23 ms)Running coprocessor post-close hooks at 1731528298430 (+100 ms)Closed at 1731528298430 2024-11-13T20:04:58,435 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=15be569b044ae47e8bf0566391821de3, regionState=CLOSED 2024-11-13T20:04:58,436 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:58,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:04:58,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=89 2024-11-13T20:04:58,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure 67f30c74b742d8a96fd8d8102693bd6e, server=54d0784d1943,36439,1731528160987 in 285 msec 2024-11-13T20:04:58,443 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=67f30c74b742d8a96fd8d8102693bd6e, UNASSIGN in 299 msec 2024-11-13T20:04:58,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 15be569b044ae47e8bf0566391821de3, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:04:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T20:04:58,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-13T20:04:58,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 15be569b044ae47e8bf0566391821de3, server=54d0784d1943,39533,1731528161273 in 298 msec 2024-11-13T20:04:58,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=88 2024-11-13T20:04:58,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=15be569b044ae47e8bf0566391821de3, UNASSIGN in 320 msec 2024-11-13T20:04:58,489 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-13T20:04:58,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 340 msec 2024-11-13T20:04:58,498 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528298497"}]},"ts":"1731528298497"} 2024-11-13T20:04:58,500 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-13T20:04:58,500 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-13T20:04:58,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 371 msec 2024-11-13T20:04:58,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T20:04:58,769 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T20:04:58,778 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-13T20:04:58,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,794 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-13T20:04:58,799 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-13T20:04:58,802 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-13T20:04:58,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-13T20:04:58,806 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-13T20:04:58,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:58,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,808 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-11-13T20:04:58,808 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:04:58,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:58,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:58,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T20:04:58,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-13T20:04:58,812 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:58,815 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/recovered.edits] 2024-11-13T20:04:58,817 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:58,822 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/recovered.edits] 2024-11-13T20:04:58,829 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf/a1727c432ec741a2b9421dcdbb689f62 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/cf/a1727c432ec741a2b9421dcdbb689f62 2024-11-13T20:04:58,831 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf/5babe1d51b33474cbb96c7746ac7096b to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/cf/5babe1d51b33474cbb96c7746ac7096b 2024-11-13T20:04:58,835 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e/recovered.edits/9.seqid 2024-11-13T20:04:58,836 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:58,837 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3/recovered.edits/9.seqid 2024-11-13T20:04:58,839 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testConsecutiveExports/15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:58,839 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-13T20:04:58,840 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-13T20:04:58,840 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-11-13T20:04:58,846 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024111344d92dfc7adf4ecd8b21d926531050bf_15be569b044ae47e8bf0566391821de3 2024-11-13T20:04:58,850 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411132f9910125ebe4bfab9e8a6312c356150_67f30c74b742d8a96fd8d8102693bd6e 2024-11-13T20:04:58,852 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-13T20:04:58,857 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,861 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-13T20:04:58,865 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-13T20:04:58,867 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,867 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-13T20:04:58,868 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528298867"}]},"ts":"9223372036854775807"} 2024-11-13T20:04:58,868 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528298867"}]},"ts":"9223372036854775807"} 2024-11-13T20:04:58,875 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:04:58,875 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 67f30c74b742d8a96fd8d8102693bd6e, NAME => 'testtb-testConsecutiveExports,,1731528258906.67f30c74b742d8a96fd8d8102693bd6e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 15be569b044ae47e8bf0566391821de3, NAME => 'testtb-testConsecutiveExports,1,1731528258906.15be569b044ae47e8bf0566391821de3.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:04:58,875 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-13T20:04:58,876 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528298875"}]},"ts":"9223372036854775807"} 2024-11-13T20:04:58,879 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-13T20:04:58,881 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T20:04:58,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 103 msec 2024-11-13T20:04:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-13T20:04:58,918 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-13T20:04:58,919 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T20:04:58,946 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-13T20:04:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-13T20:04:58,951 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-13T20:04:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-13T20:04:59,031 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=778 (was 775) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3901 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:46409 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@297a5ac6[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 4518) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:44386 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/54d0784d1943:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1653379013_1 at /127.0.0.1:37970 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:35788 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=766 (was 778), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=922 (was 951), ProcessCount=22 (was 22), AvailableMemoryMB=604 (was 426) - AvailableMemoryMB LEAK? - 2024-11-13T20:04:59,031 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=778 is superior to 500 2024-11-13T20:04:59,069 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=778, OpenFileDescriptor=766, MaxFileDescriptor=1048576, SystemLoadAverage=922, ProcessCount=22, AvailableMemoryMB=601 2024-11-13T20:04:59,069 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=778 is superior to 500 2024-11-13T20:04:59,071 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:04:59,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:04:59,079 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:04:59,081 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-13T20:04:59,081 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:04:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T20:04:59,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742107_1283 (size=458) 2024-11-13T20:04:59,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742107_1283 (size=458) 2024-11-13T20:04:59,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742107_1283 (size=458) 2024-11-13T20:04:59,108 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5c9dd49fa8390391770040ff3fba2988, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:59,111 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 49eb7d28c7639689fb92f92d4c873757, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:04:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T20:04:59,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742108_1284 (size=83) 2024-11-13T20:04:59,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742108_1284 (size=83) 2024-11-13T20:04:59,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742108_1284 (size=83) 2024-11-13T20:04:59,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:59,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 5c9dd49fa8390391770040ff3fba2988, disabling compactions & flushes 2024-11-13T20:04:59,212 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. after waiting 0 ms 2024-11-13T20:04:59,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,212 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,212 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5c9dd49fa8390391770040ff3fba2988: Waiting for close lock at 1731528299212Disabling compacts and flushes for region at 1731528299212Disabling writes for close at 1731528299212Writing region close event to WAL at 1731528299212Closed at 1731528299212 2024-11-13T20:04:59,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742109_1285 (size=83) 2024-11-13T20:04:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742109_1285 (size=83) 2024-11-13T20:04:59,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742109_1285 (size=83) 2024-11-13T20:04:59,238 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:59,238 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 49eb7d28c7639689fb92f92d4c873757, disabling compactions & flushes 2024-11-13T20:04:59,238 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,238 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,238 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. after waiting 0 ms 2024-11-13T20:04:59,238 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,238 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,238 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 49eb7d28c7639689fb92f92d4c873757: Waiting for close lock at 1731528299238Disabling compacts and flushes for region at 1731528299238Disabling writes for close at 1731528299238Writing region close event to WAL at 1731528299238Closed at 1731528299238 2024-11-13T20:04:59,251 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:04:59,252 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731528299251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528299251"}]},"ts":"1731528299251"} 2024-11-13T20:04:59,252 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731528299251"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528299251"}]},"ts":"1731528299251"} 2024-11-13T20:04:59,263 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:04:59,267 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:04:59,267 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528299267"}]},"ts":"1731528299267"} 2024-11-13T20:04:59,269 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-13T20:04:59,270 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:04:59,271 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:04:59,271 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:04:59,271 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:04:59,271 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:04:59,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, ASSIGN}] 2024-11-13T20:04:59,273 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, ASSIGN 2024-11-13T20:04:59,273 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, ASSIGN 2024-11-13T20:04:59,273 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:04:59,273 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:04:59,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T20:04:59,425 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:04:59,425 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=5c9dd49fa8390391770040ff3fba2988, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:04:59,425 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=49eb7d28c7639689fb92f92d4c873757, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:04:59,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, ASSIGN because future has completed 2024-11-13T20:04:59,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c9dd49fa8390391770040ff3fba2988, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:04:59,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, ASSIGN because future has completed 2024-11-13T20:04:59,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49eb7d28c7639689fb92f92d4c873757, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:04:59,603 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,604 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c9dd49fa8390391770040ff3fba2988, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:04:59,604 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. service=AccessControlService 2024-11-13T20:04:59,605 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:04:59,605 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,605 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:59,605 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,605 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,614 INFO [StoreOpener-5c9dd49fa8390391770040ff3fba2988-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,619 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,619 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 49eb7d28c7639689fb92f92d4c873757, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:04:59,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. service=AccessControlService 2024-11-13T20:04:59,620 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:04:59,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:04:59,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,620 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,623 INFO [StoreOpener-5c9dd49fa8390391770040ff3fba2988-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c9dd49fa8390391770040ff3fba2988 columnFamilyName cf 2024-11-13T20:04:59,626 DEBUG [StoreOpener-5c9dd49fa8390391770040ff3fba2988-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:59,627 INFO [StoreOpener-5c9dd49fa8390391770040ff3fba2988-1 {}] regionserver.HStore(327): Store=5c9dd49fa8390391770040ff3fba2988/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:04:59,627 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,628 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,628 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,629 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,629 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,631 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,642 INFO [StoreOpener-49eb7d28c7639689fb92f92d4c873757-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,644 INFO [StoreOpener-49eb7d28c7639689fb92f92d4c873757-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49eb7d28c7639689fb92f92d4c873757 columnFamilyName cf 2024-11-13T20:04:59,646 DEBUG [StoreOpener-49eb7d28c7639689fb92f92d4c873757-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:04:59,647 INFO [StoreOpener-49eb7d28c7639689fb92f92d4c873757-1 {}] regionserver.HStore(327): Store=49eb7d28c7639689fb92f92d4c873757/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:04:59,647 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,648 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,649 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,649 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,649 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,651 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:04:59,652 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened 5c9dd49fa8390391770040ff3fba2988; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64668395, jitterRate=-0.036365821957588196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:04:59,652 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:04:59,653 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for 5c9dd49fa8390391770040ff3fba2988: Running coprocessor pre-open hook at 1731528299605Writing region info on filesystem at 1731528299605Initializing all the Stores at 1731528299607 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528299607Cleaning up temporary data from old regions at 1731528299629 (+22 ms)Running coprocessor post-open hooks at 1731528299652 (+23 ms)Region opened successfully at 1731528299653 (+1 ms) 2024-11-13T20:04:59,655 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,656 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988., pid=97, masterSystemTime=1731528299581 2024-11-13T20:04:59,662 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=5c9dd49fa8390391770040ff3fba2988, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:04:59,664 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,664 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:04:59,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c9dd49fa8390391770040ff3fba2988, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:04:59,670 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:04:59,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=95 2024-11-13T20:04:59,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c9dd49fa8390391770040ff3fba2988, server=54d0784d1943,36439,1731528160987 in 240 msec 2024-11-13T20:04:59,671 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 49eb7d28c7639689fb92f92d4c873757; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73433273, jitterRate=0.09424103796482086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:04:59,671 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:04:59,672 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 49eb7d28c7639689fb92f92d4c873757: Running coprocessor pre-open hook at 1731528299620Writing region info on filesystem at 1731528299621 (+1 ms)Initializing all the Stores at 1731528299621Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528299622 (+1 ms)Cleaning up temporary data from old regions at 1731528299649 (+27 ms)Running coprocessor post-open hooks at 1731528299671 (+22 ms)Region opened successfully at 1731528299671 2024-11-13T20:04:59,673 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757., pid=98, masterSystemTime=1731528299581 2024-11-13T20:04:59,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, ASSIGN in 400 msec 2024-11-13T20:04:59,676 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,676 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:04:59,676 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=49eb7d28c7639689fb92f92d4c873757, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:04:59,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49eb7d28c7639689fb92f92d4c873757, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:04:59,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-11-13T20:04:59,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure 49eb7d28c7639689fb92f92d4c873757, server=54d0784d1943,33333,1731528161197 in 251 msec 2024-11-13T20:04:59,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-13T20:04:59,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, ASSIGN in 417 msec 2024-11-13T20:04:59,693 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:04:59,693 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528299693"}]},"ts":"1731528299693"} 2024-11-13T20:04:59,702 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-13T20:04:59,703 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:04:59,704 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-13T20:04:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T20:04:59,711 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-13T20:04:59,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:59,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:59,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:59,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:04:59,717 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:59,719 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:59,719 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:59,722 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:04:59,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 648 msec 2024-11-13T20:05:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T20:05:00,215 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T20:05:00,216 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,222 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,222 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:00,222 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:00,226 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,235 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,243 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,247 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T20:05:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528300247 (current time:1731528300247). 2024-11-13T20:05:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-13T20:05:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:00,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@568f16ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:00,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:00,249 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:00,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:00,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:00,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2800defc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:00,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:00,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,251 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39846, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:00,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a668aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:00,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:00,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:00,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:00,257 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,257 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:00,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@508b559f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:00,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:00,262 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:00,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:00,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:00,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7faae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:00,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:00,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,264 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:00,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44dbf88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:00,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:00,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:00,268 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60478, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:00,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:00,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:00,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:00,273 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-13T20:05:00,274 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:00,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T20:05:00,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-13T20:05:00,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-13T20:05:00,277 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:00,279 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:00,281 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:00,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742110_1286 (size=215) 2024-11-13T20:05:00,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742110_1286 (size=215) 2024-11-13T20:05:00,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742110_1286 (size=215) 2024-11-13T20:05:00,303 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:00,304 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757}] 2024-11-13T20:05:00,305 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:00,306 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:00,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-13T20:05:00,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:00,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for 5c9dd49fa8390391770040ff3fba2988: 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for 49eb7d28c7639689fb92f92d4c873757: 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T20:05:00,458 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:00,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:00,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742112_1288 (size=86) 2024-11-13T20:05:00,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742112_1288 (size=86) 2024-11-13T20:05:00,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742112_1288 (size=86) 2024-11-13T20:05:00,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:00,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-13T20:05:00,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-13T20:05:00,482 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:00,482 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:00,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742111_1287 (size=86) 2024-11-13T20:05:00,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742111_1287 (size=86) 2024-11-13T20:05:00,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988 in 179 msec 2024-11-13T20:05:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742111_1287 (size=86) 2024-11-13T20:05:00,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:00,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-13T20:05:00,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-13T20:05:00,486 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:00,487 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:00,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=101, resume processing ppid=99 2024-11-13T20:05:00,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757 in 185 msec 2024-11-13T20:05:00,491 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:00,493 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:00,494 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:00,494 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:00,494 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:00,495 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:05:00,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742113_1289 (size=78) 2024-11-13T20:05:00,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742113_1289 (size=78) 2024-11-13T20:05:00,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742113_1289 (size=78) 2024-11-13T20:05:00,518 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:00,518 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,519 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742114_1290 (size=713) 2024-11-13T20:05:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742114_1290 (size=713) 2024-11-13T20:05:00,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742114_1290 (size=713) 2024-11-13T20:05:00,564 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:00,575 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:00,577 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,579 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:00,579 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-13T20:05:00,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 305 msec 2024-11-13T20:05:00,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-13T20:05:00,595 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T20:05:00,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-13T20:05:00,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:00,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-13T20:05:00,614 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:00,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:00,620 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:00,623 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,630 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,639 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T20:05:00,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T20:05:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528300643 (current time:1731528300643). 2024-11-13T20:05:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-13T20:05:00,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:00,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@302328d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:00,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:00,647 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:00,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:00,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:00,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@532bdef0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:00,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:00,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,649 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:00,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ccf10e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:00,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:00,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:00,654 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:00,655 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:00,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,656 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@383391fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:00,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:00,658 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:00,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:00,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:00,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69c7be1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:00,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:00,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,660 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39886, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:00,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@235df2c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:00,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:00,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:00,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:00,664 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:00,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:00,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:00,668 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40544, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:00,669 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:00,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:00,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:00,669 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:00,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-13T20:05:00,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:00,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T20:05:00,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-13T20:05:00,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T20:05:00,674 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:00,675 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:00,678 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:00,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742115_1291 (size=210) 2024-11-13T20:05:00,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742115_1291 (size=210) 2024-11-13T20:05:00,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742115_1291 (size=210) 2024-11-13T20:05:00,704 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:00,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757}] 2024-11-13T20:05:00,705 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:00,706 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:00,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T20:05:00,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-13T20:05:00,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:00,858 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing 5c9dd49fa8390391770040ff3fba2988 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-13T20:05:00,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-13T20:05:00,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:00,859 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing 49eb7d28c7639689fb92f92d4c873757 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-13T20:05:00,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757 is 71, key is 13a6a722f0ddba4d67a928a5d8faa35e/cf:q/1731528300608/Put/seqid=0 2024-11-13T20:05:00,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988 is 69, key is 05144ccc8fff736b2789614171e518b73/cf:q/1731528300604/Put/seqid=0 2024-11-13T20:05:00,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742116_1292 (size=8312) 2024-11-13T20:05:00,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742116_1292 (size=8312) 2024-11-13T20:05:00,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742116_1292 (size=8312) 2024-11-13T20:05:00,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:00,944 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:00,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/.tmp/cf/df8b23d5148e45cf8f1366bf4570b8c2, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=49eb7d28c7639689fb92f92d4c873757] 2024-11-13T20:05:00,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/.tmp/cf/df8b23d5148e45cf8f1366bf4570b8c2 is 224, key is 12a90d0344feb92c3f3925c358f32be5f/cf:q/1731528300608/Put/seqid=0 2024-11-13T20:05:00,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742117_1293 (size=4964) 2024-11-13T20:05:00,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742117_1293 (size=4964) 2024-11-13T20:05:00,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742117_1293 (size=4964) 2024-11-13T20:05:00,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742118_1294 (size=16159) 2024-11-13T20:05:00,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742118_1294 (size=16159) 2024-11-13T20:05:00,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:00,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742118_1294 (size=16159) 2024-11-13T20:05:00,991 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/.tmp/cf/df8b23d5148e45cf8f1366bf4570b8c2 2024-11-13T20:05:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T20:05:01,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/.tmp/cf/df8b23d5148e45cf8f1366bf4570b8c2 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf/df8b23d5148e45cf8f1366bf4570b8c2 2024-11-13T20:05:01,007 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:01,010 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf/df8b23d5148e45cf8f1366bf4570b8c2, entries=49, sequenceid=6, filesize=15.8 K 2024-11-13T20:05:01,011 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 49eb7d28c7639689fb92f92d4c873757 in 152ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:01,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-13T20:05:01,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for 49eb7d28c7639689fb92f92d4c873757: 2024-11-13T20:05:01,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T20:05:01,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:01,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf/df8b23d5148e45cf8f1366bf4570b8c2] hfiles 2024-11-13T20:05:01,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf/df8b23d5148e45cf8f1366bf4570b8c2 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/.tmp/cf/6bc8924bee1d42a6b6578583de44ef39, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=5c9dd49fa8390391770040ff3fba2988] 2024-11-13T20:05:01,015 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/.tmp/cf/6bc8924bee1d42a6b6578583de44ef39 is 224, key is 05144ccc8fff736b2789614171e518b73/cf:q/1731528300604/Put/seqid=0 2024-11-13T20:05:01,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742120_1296 (size=125) 2024-11-13T20:05:01,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742120_1296 (size=125) 2024-11-13T20:05:01,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742120_1296 (size=125) 2024-11-13T20:05:01,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:01,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-13T20:05:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-13T20:05:01,061 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:01,062 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:01,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 49eb7d28c7639689fb92f92d4c873757 in 359 msec 2024-11-13T20:05:01,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742119_1295 (size=5534) 2024-11-13T20:05:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742119_1295 (size=5534) 2024-11-13T20:05:01,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742119_1295 (size=5534) 2024-11-13T20:05:01,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/.tmp/cf/6bc8924bee1d42a6b6578583de44ef39 2024-11-13T20:05:01,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/.tmp/cf/6bc8924bee1d42a6b6578583de44ef39 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf/6bc8924bee1d42a6b6578583de44ef39 2024-11-13T20:05:01,085 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf/6bc8924bee1d42a6b6578583de44ef39, entries=1, sequenceid=6, filesize=5.4 K 2024-11-13T20:05:01,086 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 5c9dd49fa8390391770040ff3fba2988 in 228ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:01,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 5c9dd49fa8390391770040ff3fba2988: 2024-11-13T20:05:01,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T20:05:01,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:01,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf/6bc8924bee1d42a6b6578583de44ef39] hfiles 2024-11-13T20:05:01,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf/6bc8924bee1d42a6b6578583de44ef39 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742121_1297 (size=125) 2024-11-13T20:05:01,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742121_1297 (size=125) 2024-11-13T20:05:01,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742121_1297 (size=125) 2024-11-13T20:05:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T20:05:01,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:01,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-13T20:05:01,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-13T20:05:01,511 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:01,511 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:01,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-13T20:05:01,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c9dd49fa8390391770040ff3fba2988 in 808 msec 2024-11-13T20:05:01,514 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:01,515 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:01,516 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:01,516 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:01,516 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:01,517 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988] hfiles 2024-11-13T20:05:01,517 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:01,517 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:01,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742122_1298 (size=309) 2024-11-13T20:05:01,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742122_1298 (size=309) 2024-11-13T20:05:01,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742122_1298 (size=309) 2024-11-13T20:05:01,534 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:01,534 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,535 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742123_1299 (size=1023) 2024-11-13T20:05:01,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742123_1299 (size=1023) 2024-11-13T20:05:01,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742123_1299 (size=1023) 2024-11-13T20:05:01,546 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:01,552 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:01,552 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:01,553 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:01,553 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-13T20:05:01,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 883 msec 2024-11-13T20:05:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T20:05:01,816 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T20:05:01,841 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T20:05:01,842 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T20:05:01,842 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T20:05:01,843 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T20:05:01,843 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44872, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T20:05:01,844 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33234, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T20:05:01,844 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-13T20:05:01,844 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36439 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-13T20:05:01,844 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-13T20:05:01,847 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:05:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:01,849 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:05:01,849 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:01,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-13T20:05:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T20:05:01,850 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:05:01,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742124_1300 (size=399) 2024-11-13T20:05:01,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742124_1300 (size=399) 2024-11-13T20:05:01,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742124_1300 (size=399) 2024-11-13T20:05:01,860 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3b2439aa92e0a303b80aec69acf9b1b7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:01,864 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d0ad0ccf962a0de7cfa75e4891744033, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742125_1301 (size=85) 2024-11-13T20:05:01,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742125_1301 (size=85) 2024-11-13T20:05:01,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742125_1301 (size=85) 2024-11-13T20:05:01,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:01,872 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 3b2439aa92e0a303b80aec69acf9b1b7, disabling compactions & flushes 2024-11-13T20:05:01,872 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:01,872 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:01,872 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. after waiting 0 ms 2024-11-13T20:05:01,872 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:01,872 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:01,872 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3b2439aa92e0a303b80aec69acf9b1b7: Waiting for close lock at 1731528301872Disabling compacts and flushes for region at 1731528301872Disabling writes for close at 1731528301872Writing region close event to WAL at 1731528301872Closed at 1731528301872 2024-11-13T20:05:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742126_1302 (size=85) 2024-11-13T20:05:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742126_1302 (size=85) 2024-11-13T20:05:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742126_1302 (size=85) 2024-11-13T20:05:01,875 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:01,875 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing d0ad0ccf962a0de7cfa75e4891744033, disabling compactions & flushes 2024-11-13T20:05:01,875 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:01,875 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0005/container_1731528172678_0005_01_000002/launch_container.sh] 2024-11-13T20:05:01,875 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:01,875 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. after waiting 0 ms 2024-11-13T20:05:01,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:01,876 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0005/container_1731528172678_0005_01_000002/container_tokens] 2024-11-13T20:05:01,876 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:01,876 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0005/container_1731528172678_0005_01_000002/sysfs] 2024-11-13T20:05:01,876 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for d0ad0ccf962a0de7cfa75e4891744033: Waiting for close lock at 1731528301875Disabling compacts and flushes for region at 1731528301875Disabling writes for close at 1731528301875Writing region close event to WAL at 1731528301876 (+1 ms)Closed at 1731528301876 2024-11-13T20:05:01,877 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:05:01,877 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731528301877"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528301877"}]},"ts":"1731528301877"} 2024-11-13T20:05:01,877 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731528301877"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528301877"}]},"ts":"1731528301877"} 2024-11-13T20:05:01,880 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:05:01,881 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:05:01,881 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528301881"}]},"ts":"1731528301881"} 2024-11-13T20:05:01,883 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-13T20:05:01,884 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:05:01,885 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:05:01,885 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:05:01,885 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:05:01,885 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:05:01,886 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, ASSIGN}] 2024-11-13T20:05:01,887 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, ASSIGN 2024-11-13T20:05:01,887 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, ASSIGN 2024-11-13T20:05:01,888 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:05:01,888 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:05:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T20:05:02,038 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:05:02,039 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=d0ad0ccf962a0de7cfa75e4891744033, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:02,039 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=3b2439aa92e0a303b80aec69acf9b1b7, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:02,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, ASSIGN because future has completed 2024-11-13T20:05:02,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0ad0ccf962a0de7cfa75e4891744033, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:02,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, ASSIGN because future has completed 2024-11-13T20:05:02,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b2439aa92e0a303b80aec69acf9b1b7, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T20:05:02,197 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,197 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,197 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => 3b2439aa92e0a303b80aec69acf9b1b7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7.', STARTKEY => '', ENDKEY => '2'} 2024-11-13T20:05:02,197 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => d0ad0ccf962a0de7cfa75e4891744033, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033.', STARTKEY => '2', ENDKEY => ''} 2024-11-13T20:05:02,197 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. service=AccessControlService 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. service=AccessControlService 2024-11-13T20:05:02,198 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:02,198 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,200 INFO [StoreOpener-3b2439aa92e0a303b80aec69acf9b1b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,200 INFO [StoreOpener-d0ad0ccf962a0de7cfa75e4891744033-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,201 INFO [StoreOpener-d0ad0ccf962a0de7cfa75e4891744033-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0ad0ccf962a0de7cfa75e4891744033 columnFamilyName cf 2024-11-13T20:05:02,201 INFO [StoreOpener-3b2439aa92e0a303b80aec69acf9b1b7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3b2439aa92e0a303b80aec69acf9b1b7 columnFamilyName cf 2024-11-13T20:05:02,201 DEBUG [StoreOpener-d0ad0ccf962a0de7cfa75e4891744033-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:02,201 DEBUG [StoreOpener-3b2439aa92e0a303b80aec69acf9b1b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:02,202 INFO [StoreOpener-d0ad0ccf962a0de7cfa75e4891744033-1 {}] regionserver.HStore(327): Store=d0ad0ccf962a0de7cfa75e4891744033/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:02,202 INFO [StoreOpener-3b2439aa92e0a303b80aec69acf9b1b7-1 {}] regionserver.HStore(327): Store=3b2439aa92e0a303b80aec69acf9b1b7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:02,202 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,202 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,202 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,203 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,205 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,205 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,207 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:02,207 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:02,208 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened d0ad0ccf962a0de7cfa75e4891744033; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70506416, jitterRate=0.05062747001647949}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:02,208 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,208 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened 3b2439aa92e0a303b80aec69acf9b1b7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63717649, jitterRate=-0.05053304135799408}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:02,208 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,209 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for d0ad0ccf962a0de7cfa75e4891744033: Running coprocessor pre-open hook at 1731528302198Writing region info on filesystem at 1731528302198Initializing all the Stores at 1731528302199 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528302199Cleaning up temporary data from old regions at 1731528302203 (+4 ms)Running coprocessor post-open hooks at 1731528302208 (+5 ms)Region opened successfully at 1731528302209 (+1 ms) 2024-11-13T20:05:02,209 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for 3b2439aa92e0a303b80aec69acf9b1b7: Running coprocessor pre-open hook at 1731528302198Writing region info on filesystem at 1731528302198Initializing all the Stores at 1731528302199 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528302199Cleaning up temporary data from old regions at 1731528302203 (+4 ms)Running coprocessor post-open hooks at 1731528302208 (+5 ms)Region opened successfully at 1731528302209 (+1 ms) 2024-11-13T20:05:02,210 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033., pid=108, masterSystemTime=1731528302194 2024-11-13T20:05:02,210 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7., pid=109, masterSystemTime=1731528302194 2024-11-13T20:05:02,211 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,211 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,212 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=d0ad0ccf962a0de7cfa75e4891744033, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:02,212 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,212 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,213 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=3b2439aa92e0a303b80aec69acf9b1b7, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:02,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure d0ad0ccf962a0de7cfa75e4891744033, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:02,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b2439aa92e0a303b80aec69acf9b1b7, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:02,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-13T20:05:02,218 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure d0ad0ccf962a0de7cfa75e4891744033, server=54d0784d1943,33333,1731528161197 in 174 msec 2024-11-13T20:05:02,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=106 2024-11-13T20:05:02,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure 3b2439aa92e0a303b80aec69acf9b1b7, server=54d0784d1943,36439,1731528160987 in 174 msec 2024-11-13T20:05:02,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, ASSIGN in 333 msec 2024-11-13T20:05:02,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=105 2024-11-13T20:05:02,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, ASSIGN in 334 msec 2024-11-13T20:05:02,221 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:05:02,222 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528302222"}]},"ts":"1731528302222"} 2024-11-13T20:05:02,223 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-13T20:05:02,224 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:05:02,224 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-13T20:05:02,227 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-13T20:05:02,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:02,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:02,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:02,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:02,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:02,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 385 msec 2024-11-13T20:05:02,368 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-13T20:05:02,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T20:05:02,475 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T20:05:02,478 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:02,482 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033., hostname=54d0784d1943,33333,1731528161197, seqNum=2] 2024-11-13T20:05:02,484 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-13T20:05:02,500 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [3b2439aa92e0a303b80aec69acf9b1b7, d0ad0ccf962a0de7cfa75e4891744033] 2024-11-13T20:05:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3b2439aa92e0a303b80aec69acf9b1b7, d0ad0ccf962a0de7cfa75e4891744033], force=true 2024-11-13T20:05:02,508 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3b2439aa92e0a303b80aec69acf9b1b7, d0ad0ccf962a0de7cfa75e4891744033], force=true 2024-11-13T20:05:02,508 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3b2439aa92e0a303b80aec69acf9b1b7, d0ad0ccf962a0de7cfa75e4891744033], force=true 2024-11-13T20:05:02,508 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3b2439aa92e0a303b80aec69acf9b1b7, d0ad0ccf962a0de7cfa75e4891744033], force=true 2024-11-13T20:05:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T20:05:02,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, UNASSIGN}] 2024-11-13T20:05:02,522 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, UNASSIGN 2024-11-13T20:05:02,522 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, UNASSIGN 2024-11-13T20:05:02,523 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=3b2439aa92e0a303b80aec69acf9b1b7, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:02,523 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=d0ad0ccf962a0de7cfa75e4891744033, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:02,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, UNASSIGN because future has completed 2024-11-13T20:05:02,525 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:02,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure d0ad0ccf962a0de7cfa75e4891744033, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:02,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, UNASSIGN because future has completed 2024-11-13T20:05:02,528 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:02,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b2439aa92e0a303b80aec69acf9b1b7, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:02,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T20:05:02,678 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,678 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T20:05:02,678 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing d0ad0ccf962a0de7cfa75e4891744033, disabling compactions & flushes 2024-11-13T20:05:02,678 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,678 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,679 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. after waiting 0 ms 2024-11-13T20:05:02,679 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,679 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing d0ad0ccf962a0de7cfa75e4891744033 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-13T20:05:02,681 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,682 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T20:05:02,682 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing 3b2439aa92e0a303b80aec69acf9b1b7, disabling compactions & flushes 2024-11-13T20:05:02,682 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,682 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,682 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. after waiting 0 ms 2024-11-13T20:05:02,682 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,682 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing 3b2439aa92e0a303b80aec69acf9b1b7 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-13T20:05:02,697 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/.tmp/cf/ee87709a38c74570999ca6ca3e27c811 is 28, key is 2/cf:/1731528302483/Put/seqid=0 2024-11-13T20:05:02,705 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/.tmp/cf/c7616af49fb44819b5a485db43d23f46 is 28, key is 1/cf:/1731528302479/Put/seqid=0 2024-11-13T20:05:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742127_1303 (size=4945) 2024-11-13T20:05:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742127_1303 (size=4945) 2024-11-13T20:05:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742127_1303 (size=4945) 2024-11-13T20:05:02,716 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/.tmp/cf/ee87709a38c74570999ca6ca3e27c811 2024-11-13T20:05:02,724 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/.tmp/cf/ee87709a38c74570999ca6ca3e27c811 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf/ee87709a38c74570999ca6ca3e27c811 2024-11-13T20:05:02,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742128_1304 (size=4945) 2024-11-13T20:05:02,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742128_1304 (size=4945) 2024-11-13T20:05:02,731 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf/ee87709a38c74570999ca6ca3e27c811, entries=1, sequenceid=5, filesize=4.8 K 2024-11-13T20:05:02,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742128_1304 (size=4945) 2024-11-13T20:05:02,732 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d0ad0ccf962a0de7cfa75e4891744033 in 53ms, sequenceid=5, compaction requested=false 2024-11-13T20:05:02,733 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/.tmp/cf/c7616af49fb44819b5a485db43d23f46 2024-11-13T20:05:02,740 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/.tmp/cf/c7616af49fb44819b5a485db43d23f46 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf/c7616af49fb44819b5a485db43d23f46 2024-11-13T20:05:02,742 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T20:05:02,743 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:02,743 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. 2024-11-13T20:05:02,743 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for d0ad0ccf962a0de7cfa75e4891744033: Waiting for close lock at 1731528302678Running coprocessor pre-close hooks at 1731528302678Disabling compacts and flushes for region at 1731528302678Disabling writes for close at 1731528302679 (+1 ms)Obtaining lock to block concurrent updates at 1731528302679Preparing flush snapshotting stores in d0ad0ccf962a0de7cfa75e4891744033 at 1731528302679Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731528302679Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033. at 1731528302680 (+1 ms)Flushing d0ad0ccf962a0de7cfa75e4891744033/cf: creating writer at 1731528302680Flushing d0ad0ccf962a0de7cfa75e4891744033/cf: appending metadata at 1731528302696 (+16 ms)Flushing d0ad0ccf962a0de7cfa75e4891744033/cf: closing flushed file at 1731528302696Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8f2a07f: reopening flushed file at 1731528302723 (+27 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d0ad0ccf962a0de7cfa75e4891744033 in 53ms, sequenceid=5, compaction requested=false at 1731528302732 (+9 ms)Writing region close event to WAL at 1731528302733 (+1 ms)Running coprocessor post-close hooks at 1731528302742 (+9 ms)Closed at 1731528302743 (+1 ms) 2024-11-13T20:05:02,747 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:02,747 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=d0ad0ccf962a0de7cfa75e4891744033, regionState=CLOSED 2024-11-13T20:05:02,747 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf/c7616af49fb44819b5a485db43d23f46, entries=1, sequenceid=5, filesize=4.8 K 2024-11-13T20:05:02,749 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 3b2439aa92e0a303b80aec69acf9b1b7 in 66ms, sequenceid=5, compaction requested=false 2024-11-13T20:05:02,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure d0ad0ccf962a0de7cfa75e4891744033, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:02,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=112 2024-11-13T20:05:02,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure d0ad0ccf962a0de7cfa75e4891744033, server=54d0784d1943,33333,1731528161197 in 225 msec 2024-11-13T20:05:02,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d0ad0ccf962a0de7cfa75e4891744033, UNASSIGN in 233 msec 2024-11-13T20:05:02,777 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T20:05:02,778 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:02,778 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. 2024-11-13T20:05:02,779 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for 3b2439aa92e0a303b80aec69acf9b1b7: Waiting for close lock at 1731528302682Running coprocessor pre-close hooks at 1731528302682Disabling compacts and flushes for region at 1731528302682Disabling writes for close at 1731528302682Obtaining lock to block concurrent updates at 1731528302682Preparing flush snapshotting stores in 3b2439aa92e0a303b80aec69acf9b1b7 at 1731528302682Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731528302682Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7. at 1731528302683 (+1 ms)Flushing 3b2439aa92e0a303b80aec69acf9b1b7/cf: creating writer at 1731528302683Flushing 3b2439aa92e0a303b80aec69acf9b1b7/cf: appending metadata at 1731528302704 (+21 ms)Flushing 3b2439aa92e0a303b80aec69acf9b1b7/cf: closing flushed file at 1731528302704Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@238a294c: reopening flushed file at 1731528302739 (+35 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 3b2439aa92e0a303b80aec69acf9b1b7 in 66ms, sequenceid=5, compaction requested=false at 1731528302749 (+10 ms)Writing region close event to WAL at 1731528302753 (+4 ms)Running coprocessor post-close hooks at 1731528302778 (+25 ms)Closed at 1731528302778 2024-11-13T20:05:02,781 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed 3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:02,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=3b2439aa92e0a303b80aec69acf9b1b7, regionState=CLOSED 2024-11-13T20:05:02,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b2439aa92e0a303b80aec69acf9b1b7, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:02,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=111 2024-11-13T20:05:02,789 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure 3b2439aa92e0a303b80aec69acf9b1b7, server=54d0784d1943,36439,1731528160987 in 258 msec 2024-11-13T20:05:02,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-13T20:05:02,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3b2439aa92e0a303b80aec69acf9b1b7, UNASSIGN in 268 msec 2024-11-13T20:05:02,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742129_1305 (size=84) 2024-11-13T20:05:02,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742129_1305 (size=84) 2024-11-13T20:05:02,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742129_1305 (size=84) 2024-11-13T20:05:02,821 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:02,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T20:05:02,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742130_1306 (size=20) 2024-11-13T20:05:02,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742130_1306 (size=20) 2024-11-13T20:05:02,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742130_1306 (size=20) 2024-11-13T20:05:02,837 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742131_1307 (size=21) 2024-11-13T20:05:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742131_1307 (size=21) 2024-11-13T20:05:02,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742131_1307 (size=21) 2024-11-13T20:05:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742132_1308 (size=84) 2024-11-13T20:05:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742132_1308 (size=84) 2024-11-13T20:05:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742132_1308 (size=84) 2024-11-13T20:05:02,852 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:02,862 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-13T20:05:02,865 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301847.3b2439aa92e0a303b80aec69acf9b1b7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:02,865 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731528301847.d0ad0ccf962a0de7cfa75e4891744033.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:02,865 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:02,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, ASSIGN}] 2024-11-13T20:05:02,887 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, ASSIGN 2024-11-13T20:05:02,888 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, ASSIGN; state=MERGED, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:05:03,018 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0005_000001 (auth:SIMPLE) from 127.0.0.1:55244 2024-11-13T20:05:03,025 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0005/container_1731528172678_0005_01_000001/launch_container.sh] 2024-11-13T20:05:03,025 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0005/container_1731528172678_0005_01_000001/container_tokens] 2024-11-13T20:05:03,025 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_2/usercache/jenkins/appcache/application_1731528172678_0005/container_1731528172678_0005_01_000001/sysfs] 2024-11-13T20:05:03,039 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T20:05:03,039 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=2bb4bf5ae8a4b752b723f73eda35b84f, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:03,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, ASSIGN because future has completed 2024-11-13T20:05:03,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T20:05:03,198 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:03,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => 2bb4bf5ae8a4b752b723f73eda35b84f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.', STARTKEY => '', ENDKEY => ''} 2024-11-13T20:05:03,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. service=AccessControlService 2024-11-13T20:05:03,198 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:03,198 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,199 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:03,199 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,199 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,200 INFO [StoreOpener-2bb4bf5ae8a4b752b723f73eda35b84f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,201 INFO [StoreOpener-2bb4bf5ae8a4b752b723f73eda35b84f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bb4bf5ae8a4b752b723f73eda35b84f columnFamilyName cf 2024-11-13T20:05:03,202 DEBUG [StoreOpener-2bb4bf5ae8a4b752b723f73eda35b84f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:03,226 DEBUG [StoreOpener-2bb4bf5ae8a4b752b723f73eda35b84f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/c7616af49fb44819b5a485db43d23f46.3b2439aa92e0a303b80aec69acf9b1b7->hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf/c7616af49fb44819b5a485db43d23f46-top 2024-11-13T20:05:03,233 DEBUG [StoreOpener-2bb4bf5ae8a4b752b723f73eda35b84f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/ee87709a38c74570999ca6ca3e27c811.d0ad0ccf962a0de7cfa75e4891744033->hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf/ee87709a38c74570999ca6ca3e27c811-top 2024-11-13T20:05:03,234 INFO [StoreOpener-2bb4bf5ae8a4b752b723f73eda35b84f-1 {}] regionserver.HStore(327): Store=2bb4bf5ae8a4b752b723f73eda35b84f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:03,234 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,235 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,236 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,237 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,237 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,239 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,240 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened 2bb4bf5ae8a4b752b723f73eda35b84f; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58744437, jitterRate=-0.12463967502117157}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:03,240 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,241 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for 2bb4bf5ae8a4b752b723f73eda35b84f: Running coprocessor pre-open hook at 1731528303199Writing region info on filesystem at 1731528303199Initializing all the Stores at 1731528303200 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528303200Cleaning up temporary data from old regions at 1731528303237 (+37 ms)Running coprocessor post-open hooks at 1731528303240 (+3 ms)Region opened successfully at 1731528303241 (+1 ms) 2024-11-13T20:05:03,242 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f., pid=116, masterSystemTime=1731528303194 2024-11-13T20:05:03,243 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.,because compaction is disabled. 2024-11-13T20:05:03,246 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:03,246 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:03,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=2bb4bf5ae8a4b752b723f73eda35b84f, regionState=OPEN, openSeqNum=9, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:03,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:03,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-13T20:05:03,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f, server=54d0784d1943,36439,1731528160987 in 208 msec 2024-11-13T20:05:03,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-13T20:05:03,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, ASSIGN in 366 msec 2024-11-13T20:05:03,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3b2439aa92e0a303b80aec69acf9b1b7, d0ad0ccf962a0de7cfa75e4891744033], force=true in 752 msec 2024-11-13T20:05:03,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T20:05:03,646 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T20:05:03,646 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-13T20:05:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528303646 (current time:1731528303646). 2024-11-13T20:05:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-13T20:05:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13fa0b10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:03,649 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:03,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:03,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:03,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46988540, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:03,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:03,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:03,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:03,650 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36658, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:03,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23b620de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:03,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:03,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:03,653 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33306, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:03,654 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:03,654 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47c3a0fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:03,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:03,656 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a152b4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:03,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:03,657 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:03,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51fff99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:03,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:03,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:03,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:03,660 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33322, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:03,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:03,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:03,663 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44884, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:03,664 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-13T20:05:03,665 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-13T20:05:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-13T20:05:03,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T20:05:03,667 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:03,668 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:03,671 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:03,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742133_1309 (size=216) 2024-11-13T20:05:03,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742133_1309 (size=216) 2024-11-13T20:05:03,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742133_1309 (size=216) 2024-11-13T20:05:03,684 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:03,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f}] 2024-11-13T20:05:03,685 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T20:05:03,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-13T20:05:03,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:03,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for 2bb4bf5ae8a4b752b723f73eda35b84f: 2024-11-13T20:05:03,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-13T20:05:03,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:03,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:03,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/c7616af49fb44819b5a485db43d23f46.3b2439aa92e0a303b80aec69acf9b1b7->hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf/c7616af49fb44819b5a485db43d23f46-top, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/ee87709a38c74570999ca6ca3e27c811.d0ad0ccf962a0de7cfa75e4891744033->hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf/ee87709a38c74570999ca6ca3e27c811-top] hfiles 2024-11-13T20:05:03,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/c7616af49fb44819b5a485db43d23f46.3b2439aa92e0a303b80aec69acf9b1b7 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:03,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/ee87709a38c74570999ca6ca3e27c811.d0ad0ccf962a0de7cfa75e4891744033 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:03,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742134_1310 (size=269) 2024-11-13T20:05:03,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742134_1310 (size=269) 2024-11-13T20:05:03,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742134_1310 (size=269) 2024-11-13T20:05:03,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:03,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-13T20:05:03,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-13T20:05:03,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,848 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:03,850 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-13T20:05:03,850 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f in 165 msec 2024-11-13T20:05:03,850 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:03,851 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:03,852 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:03,852 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:03,853 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:03,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742135_1311 (size=670) 2024-11-13T20:05:03,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742135_1311 (size=670) 2024-11-13T20:05:03,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742135_1311 (size=670) 2024-11-13T20:05:03,870 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:03,875 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:03,875 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:03,876 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:03,876 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-13T20:05:03,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 211 msec 2024-11-13T20:05:03,961 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:05:03,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T20:05:03,986 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T20:05:03,986 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986 2024-11-13T20:05:03,986 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:04,022 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:04,022 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:04,024 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:05:04,028 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:04,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742136_1312 (size=670) 2024-11-13T20:05:04,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742136_1312 (size=670) 2024-11-13T20:05:04,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742137_1313 (size=216) 2024-11-13T20:05:04,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742136_1312 (size=670) 2024-11-13T20:05:04,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742137_1313 (size=216) 2024-11-13T20:05:04,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742137_1313 (size=216) 2024-11-13T20:05:04,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:04,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:04,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-3015452012674457916.jar 2024-11-13T20:05:05,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,122 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-4879890744436996037.jar 2024-11-13T20:05:05,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:05,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:05:05,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:05:05,192 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:05:05,192 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:05:05,193 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:05:05,193 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:05:05,193 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:05:05,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:05:05,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:05:05,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:05:05,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:05:05,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:05,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:05,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:05,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:05,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:05,197 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:05,197 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:05,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742138_1314 (size=131440) 2024-11-13T20:05:05,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742138_1314 (size=131440) 2024-11-13T20:05:05,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742138_1314 (size=131440) 2024-11-13T20:05:05,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742139_1315 (size=4188619) 2024-11-13T20:05:05,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742139_1315 (size=4188619) 2024-11-13T20:05:05,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742139_1315 (size=4188619) 2024-11-13T20:05:05,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742140_1316 (size=1323991) 2024-11-13T20:05:05,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742140_1316 (size=1323991) 2024-11-13T20:05:05,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742140_1316 (size=1323991) 2024-11-13T20:05:05,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742141_1317 (size=903730) 2024-11-13T20:05:05,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742141_1317 (size=903730) 2024-11-13T20:05:05,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742141_1317 (size=903730) 2024-11-13T20:05:05,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742142_1318 (size=8360083) 2024-11-13T20:05:05,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742142_1318 (size=8360083) 2024-11-13T20:05:05,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742142_1318 (size=8360083) 2024-11-13T20:05:05,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742143_1319 (size=1877034) 2024-11-13T20:05:05,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742143_1319 (size=1877034) 2024-11-13T20:05:05,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742143_1319 (size=1877034) 2024-11-13T20:05:05,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742144_1320 (size=77835) 2024-11-13T20:05:05,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742144_1320 (size=77835) 2024-11-13T20:05:05,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742144_1320 (size=77835) 2024-11-13T20:05:05,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742145_1321 (size=30949) 2024-11-13T20:05:05,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742145_1321 (size=30949) 2024-11-13T20:05:05,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742145_1321 (size=30949) 2024-11-13T20:05:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742146_1322 (size=1597327) 2024-11-13T20:05:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742146_1322 (size=1597327) 2024-11-13T20:05:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742146_1322 (size=1597327) 2024-11-13T20:05:05,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742147_1323 (size=4695811) 2024-11-13T20:05:05,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742147_1323 (size=4695811) 2024-11-13T20:05:05,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742147_1323 (size=4695811) 2024-11-13T20:05:05,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742148_1324 (size=232957) 2024-11-13T20:05:05,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742148_1324 (size=232957) 2024-11-13T20:05:05,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742148_1324 (size=232957) 2024-11-13T20:05:05,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742149_1325 (size=127628) 2024-11-13T20:05:05,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742149_1325 (size=127628) 2024-11-13T20:05:05,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742149_1325 (size=127628) 2024-11-13T20:05:05,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742150_1326 (size=20406) 2024-11-13T20:05:05,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742150_1326 (size=20406) 2024-11-13T20:05:05,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742150_1326 (size=20406) 2024-11-13T20:05:05,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742151_1327 (size=5175431) 2024-11-13T20:05:05,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742151_1327 (size=5175431) 2024-11-13T20:05:05,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742151_1327 (size=5175431) 2024-11-13T20:05:05,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742152_1328 (size=6424542) 2024-11-13T20:05:05,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742152_1328 (size=6424542) 2024-11-13T20:05:05,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742152_1328 (size=6424542) 2024-11-13T20:05:05,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742153_1329 (size=217634) 2024-11-13T20:05:05,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742153_1329 (size=217634) 2024-11-13T20:05:05,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742153_1329 (size=217634) 2024-11-13T20:05:05,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742154_1330 (size=1832290) 2024-11-13T20:05:05,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742154_1330 (size=1832290) 2024-11-13T20:05:05,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742154_1330 (size=1832290) 2024-11-13T20:05:05,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742155_1331 (size=322274) 2024-11-13T20:05:05,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742155_1331 (size=322274) 2024-11-13T20:05:05,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742155_1331 (size=322274) 2024-11-13T20:05:05,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742156_1332 (size=503880) 2024-11-13T20:05:05,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742156_1332 (size=503880) 2024-11-13T20:05:05,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742156_1332 (size=503880) 2024-11-13T20:05:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742157_1333 (size=440392) 2024-11-13T20:05:05,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742157_1333 (size=440392) 2024-11-13T20:05:05,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742157_1333 (size=440392) 2024-11-13T20:05:05,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742158_1334 (size=29229) 2024-11-13T20:05:05,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742158_1334 (size=29229) 2024-11-13T20:05:05,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742158_1334 (size=29229) 2024-11-13T20:05:05,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742159_1335 (size=24096) 2024-11-13T20:05:05,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742159_1335 (size=24096) 2024-11-13T20:05:05,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742159_1335 (size=24096) 2024-11-13T20:05:05,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742160_1336 (size=111872) 2024-11-13T20:05:05,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742160_1336 (size=111872) 2024-11-13T20:05:05,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742160_1336 (size=111872) 2024-11-13T20:05:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742161_1337 (size=45609) 2024-11-13T20:05:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742161_1337 (size=45609) 2024-11-13T20:05:05,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742161_1337 (size=45609) 2024-11-13T20:05:05,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742162_1338 (size=136454) 2024-11-13T20:05:05,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742162_1338 (size=136454) 2024-11-13T20:05:05,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742162_1338 (size=136454) 2024-11-13T20:05:05,633 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:05:05,635 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-13T20:05:05,637 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-13T20:05:05,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742163_1339 (size=378) 2024-11-13T20:05:05,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742163_1339 (size=378) 2024-11-13T20:05:05,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742163_1339 (size=378) 2024-11-13T20:05:05,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742164_1340 (size=15) 2024-11-13T20:05:05,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742164_1340 (size=15) 2024-11-13T20:05:05,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742164_1340 (size=15) 2024-11-13T20:05:05,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742165_1341 (size=303785) 2024-11-13T20:05:05,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742165_1341 (size=303785) 2024-11-13T20:05:05,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742165_1341 (size=303785) 2024-11-13T20:05:05,684 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:05:05,684 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:05:06,014 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0006_000001 (auth:SIMPLE) from 127.0.0.1:55256 2024-11-13T20:05:08,852 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:05:10,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:10,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-13T20:05:12,030 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0006_000001 (auth:SIMPLE) from 127.0.0.1:54702 2024-11-13T20:05:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742166_1342 (size=349435) 2024-11-13T20:05:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742166_1342 (size=349435) 2024-11-13T20:05:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742166_1342 (size=349435) 2024-11-13T20:05:14,361 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0006_000001 (auth:SIMPLE) from 127.0.0.1:44308 2024-11-13T20:05:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742167_1343 (size=4945) 2024-11-13T20:05:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742167_1343 (size=4945) 2024-11-13T20:05:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742167_1343 (size=4945) 2024-11-13T20:05:18,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742168_1344 (size=4945) 2024-11-13T20:05:18,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742168_1344 (size=4945) 2024-11-13T20:05:18,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742168_1344 (size=4945) 2024-11-13T20:05:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742169_1345 (size=17474) 2024-11-13T20:05:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742169_1345 (size=17474) 2024-11-13T20:05:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742169_1345 (size=17474) 2024-11-13T20:05:19,048 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0006/container_1731528172678_0006_01_000002/launch_container.sh] 2024-11-13T20:05:19,049 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0006/container_1731528172678_0006_01_000002/container_tokens] 2024-11-13T20:05:19,049 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0006/container_1731528172678_0006_01_000002/sysfs] 2024-11-13T20:05:19,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742170_1346 (size=482) 2024-11-13T20:05:19,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742170_1346 (size=482) 2024-11-13T20:05:19,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742170_1346 (size=482) 2024-11-13T20:05:19,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742171_1347 (size=17474) 2024-11-13T20:05:19,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742171_1347 (size=17474) 2024-11-13T20:05:19,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742171_1347 (size=17474) 2024-11-13T20:05:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742172_1348 (size=349435) 2024-11-13T20:05:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742172_1348 (size=349435) 2024-11-13T20:05:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742172_1348 (size=349435) 2024-11-13T20:05:21,061 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:05:21,063 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:05:21,070 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,070 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:05:21,070 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:05:21,070 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,071 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-13T20:05:21,071 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-13T20:05:21,071 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,072 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-13T20:05:21,072 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528303986/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-13T20:05:21,082 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-13T20:05:21,088 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528321088"}]},"ts":"1731528321088"} 2024-11-13T20:05:21,091 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-13T20:05:21,091 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-13T20:05:21,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-13T20:05:21,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, UNASSIGN}] 2024-11-13T20:05:21,102 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, UNASSIGN 2024-11-13T20:05:21,103 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=2bb4bf5ae8a4b752b723f73eda35b84f, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:21,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, UNASSIGN because future has completed 2024-11-13T20:05:21,106 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:21,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:21,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-13T20:05:21,258 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:21,258 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:21,258 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing 2bb4bf5ae8a4b752b723f73eda35b84f, disabling compactions & flushes 2024-11-13T20:05:21,258 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:21,258 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:21,258 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. after waiting 0 ms 2024-11-13T20:05:21,258 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:21,263 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-13T20:05:21,263 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:21,264 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f. 2024-11-13T20:05:21,264 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for 2bb4bf5ae8a4b752b723f73eda35b84f: Waiting for close lock at 1731528321258Running coprocessor pre-close hooks at 1731528321258Disabling compacts and flushes for region at 1731528321258Disabling writes for close at 1731528321258Writing region close event to WAL at 1731528321259 (+1 ms)Running coprocessor post-close hooks at 1731528321263 (+4 ms)Closed at 1731528321264 (+1 ms) 2024-11-13T20:05:21,265 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed 2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:21,266 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=2bb4bf5ae8a4b752b723f73eda35b84f, regionState=CLOSED 2024-11-13T20:05:21,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:21,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-13T20:05:21,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure 2bb4bf5ae8a4b752b723f73eda35b84f, server=54d0784d1943,36439,1731528160987 in 162 msec 2024-11-13T20:05:21,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-13T20:05:21,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2bb4bf5ae8a4b752b723f73eda35b84f, UNASSIGN in 170 msec 2024-11-13T20:05:21,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-13T20:05:21,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 181 msec 2024-11-13T20:05:21,276 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528321275"}]},"ts":"1731528321275"} 2024-11-13T20:05:21,277 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-13T20:05:21,277 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-13T20:05:21,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 196 msec 2024-11-13T20:05:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-13T20:05:21,406 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T20:05:21,407 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,409 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,410 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,413 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,414 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:21,414 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:21,416 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/recovered.edits] 2024-11-13T20:05:21,416 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/recovered.edits] 2024-11-13T20:05:21,417 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:21,421 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/recovered.edits] 2024-11-13T20:05:21,425 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf/c7616af49fb44819b5a485db43d23f46 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/cf/c7616af49fb44819b5a485db43d23f46 2024-11-13T20:05:21,426 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/c7616af49fb44819b5a485db43d23f46.3b2439aa92e0a303b80aec69acf9b1b7 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/c7616af49fb44819b5a485db43d23f46.3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:21,429 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/ee87709a38c74570999ca6ca3e27c811.d0ad0ccf962a0de7cfa75e4891744033 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/cf/ee87709a38c74570999ca6ca3e27c811.d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:21,432 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/recovered.edits/8.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7/recovered.edits/8.seqid 2024-11-13T20:05:21,433 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3b2439aa92e0a303b80aec69acf9b1b7 2024-11-13T20:05:21,433 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf/ee87709a38c74570999ca6ca3e27c811 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/cf/ee87709a38c74570999ca6ca3e27c811 2024-11-13T20:05:21,440 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/recovered.edits/12.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f/recovered.edits/12.seqid 2024-11-13T20:05:21,441 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2bb4bf5ae8a4b752b723f73eda35b84f 2024-11-13T20:05:21,442 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/recovered.edits/8.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033/recovered.edits/8.seqid 2024-11-13T20:05:21,443 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d0ad0ccf962a0de7cfa75e4891744033 2024-11-13T20:05:21,443 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-13T20:05:21,449 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,454 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-13T20:05:21,458 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-13T20:05:21,462 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,462 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-13T20:05:21,463 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528321462"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:21,465 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-13T20:05:21,465 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2bb4bf5ae8a4b752b723f73eda35b84f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f.', STARTKEY => '', ENDKEY => ''}] 2024-11-13T20:05:21,465 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-13T20:05:21,466 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528321465"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:21,469 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-13T20:05:21,470 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 63 msec 2024-11-13T20:05:21,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-11-13T20:05:21,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-11-13T20:05:21,539 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:05:21,539 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:05:21,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-13T20:05:21,541 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:21,541 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T20:05:21,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-11-13T20:05:21,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-11-13T20:05:21,541 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:05:21,541 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:05:21,542 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-13T20:05:21,547 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528321547"}]},"ts":"1731528321547"} 2024-11-13T20:05:21,549 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-13T20:05:21,549 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-13T20:05:21,550 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:21,550 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:21,550 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:21,550 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:21,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-13T20:05:21,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, UNASSIGN}] 2024-11-13T20:05:21,553 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, UNASSIGN 2024-11-13T20:05:21,553 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, UNASSIGN 2024-11-13T20:05:21,554 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=49eb7d28c7639689fb92f92d4c873757, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:21,554 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=5c9dd49fa8390391770040ff3fba2988, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:21,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, UNASSIGN because future has completed 2024-11-13T20:05:21,558 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:21,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 49eb7d28c7639689fb92f92d4c873757, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:21,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, UNASSIGN because future has completed 2024-11-13T20:05:21,559 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:21,559 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c9dd49fa8390391770040ff3fba2988, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-13T20:05:21,711 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:21,711 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:21,711 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 49eb7d28c7639689fb92f92d4c873757, disabling compactions & flushes 2024-11-13T20:05:21,711 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:21,711 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:21,711 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. after waiting 0 ms 2024-11-13T20:05:21,711 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:21,713 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:21,713 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:21,713 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 5c9dd49fa8390391770040ff3fba2988, disabling compactions & flushes 2024-11-13T20:05:21,713 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:21,713 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:21,713 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. after waiting 0 ms 2024-11-13T20:05:21,713 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:21,715 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:05:21,716 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:21,716 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757. 2024-11-13T20:05:21,716 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 49eb7d28c7639689fb92f92d4c873757: Waiting for close lock at 1731528321711Running coprocessor pre-close hooks at 1731528321711Disabling compacts and flushes for region at 1731528321711Disabling writes for close at 1731528321711Writing region close event to WAL at 1731528321712 (+1 ms)Running coprocessor post-close hooks at 1731528321716 (+4 ms)Closed at 1731528321716 2024-11-13T20:05:21,717 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:05:21,718 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:21,718 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988. 2024-11-13T20:05:21,718 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 5c9dd49fa8390391770040ff3fba2988: Waiting for close lock at 1731528321713Running coprocessor pre-close hooks at 1731528321713Disabling compacts and flushes for region at 1731528321713Disabling writes for close at 1731528321713Writing region close event to WAL at 1731528321714 (+1 ms)Running coprocessor post-close hooks at 1731528321718 (+4 ms)Closed at 1731528321718 2024-11-13T20:05:21,718 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:21,720 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=49eb7d28c7639689fb92f92d4c873757, regionState=CLOSED 2024-11-13T20:05:21,720 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:21,721 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=5c9dd49fa8390391770040ff3fba2988, regionState=CLOSED 2024-11-13T20:05:21,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 49eb7d28c7639689fb92f92d4c873757, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:21,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c9dd49fa8390391770040ff3fba2988, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:21,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=127 2024-11-13T20:05:21,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 49eb7d28c7639689fb92f92d4c873757, server=54d0784d1943,33333,1731528161197 in 165 msec 2024-11-13T20:05:21,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=126 2024-11-13T20:05:21,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure 5c9dd49fa8390391770040ff3fba2988, server=54d0784d1943,36439,1731528160987 in 165 msec 2024-11-13T20:05:21,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=49eb7d28c7639689fb92f92d4c873757, UNASSIGN in 173 msec 2024-11-13T20:05:21,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-13T20:05:21,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=5c9dd49fa8390391770040ff3fba2988, UNASSIGN in 174 msec 2024-11-13T20:05:21,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-13T20:05:21,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 178 msec 2024-11-13T20:05:21,731 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528321731"}]},"ts":"1731528321731"} 2024-11-13T20:05:21,733 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-13T20:05:21,733 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-13T20:05:21,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 191 msec 2024-11-13T20:05:21,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-13T20:05:21,865 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T20:05:21,866 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,868 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,869 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,872 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T20:05:21,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T20:05:21,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T20:05:21,877 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:21,877 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:21,877 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T20:05:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:21,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-13T20:05:21,880 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/recovered.edits] 2024-11-13T20:05:21,880 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/recovered.edits] 2024-11-13T20:05:21,883 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf/6bc8924bee1d42a6b6578583de44ef39 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/cf/6bc8924bee1d42a6b6578583de44ef39 2024-11-13T20:05:21,883 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf/df8b23d5148e45cf8f1366bf4570b8c2 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/cf/df8b23d5148e45cf8f1366bf4570b8c2 2024-11-13T20:05:21,886 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988/recovered.edits/9.seqid 2024-11-13T20:05:21,886 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757/recovered.edits/9.seqid 2024-11-13T20:05:21,887 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:21,887 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithMergeRegion/49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:21,887 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-13T20:05:21,887 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-13T20:05:21,888 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-11-13T20:05:21,892 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202411131e8fdaea04524951b4765620f45e677c_49eb7d28c7639689fb92f92d4c873757 2024-11-13T20:05:21,893 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411138d4a18dad997421a8160e6bf4358433d_5c9dd49fa8390391770040ff3fba2988 2024-11-13T20:05:21,894 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-13T20:05:21,896 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,899 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-13T20:05:21,901 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-13T20:05:21,902 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,902 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-13T20:05:21,902 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528321902"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:21,902 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528321902"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:21,904 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:05:21,904 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5c9dd49fa8390391770040ff3fba2988, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731528299071.5c9dd49fa8390391770040ff3fba2988.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 49eb7d28c7639689fb92f92d4c873757, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731528299071.49eb7d28c7639689fb92f92d4c873757.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:05:21,904 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-13T20:05:21,904 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528321904"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:21,906 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-13T20:05:21,907 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,908 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 41 msec 2024-11-13T20:05:21,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-13T20:05:21,985 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,985 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T20:05:21,995 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-13T20:05:21,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:21,998 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-13T20:05:22,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:22,001 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-13T20:05:22,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:22,030 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=795 (was 778) Potentially hanging thread: Thread-4614 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:48164 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1064736930_1 at /127.0.0.1:57200 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:57226 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 7232) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:34241 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:49670 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34241 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1064736930_1 at /127.0.0.1:49646 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=785 (was 766) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=827 (was 922), ProcessCount=20 (was 22), AvailableMemoryMB=669 (was 601) - AvailableMemoryMB LEAK? - 2024-11-13T20:05:22,030 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-13T20:05:22,058 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=795, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=827, ProcessCount=20, AvailableMemoryMB=667 2024-11-13T20:05:22,058 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-13T20:05:22,060 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:05:22,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:22,063 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:05:22,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-13T20:05:22,064 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:05:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T20:05:22,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742173_1349 (size=443) 2024-11-13T20:05:22,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742173_1349 (size=443) 2024-11-13T20:05:22,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742173_1349 (size=443) 2024-11-13T20:05:22,073 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b54ae02535d1d27e78ca0186c0c95ab0, NAME => 'testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:22,074 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3a5d8ac785b157426666dce1adfe931e, NAME => 'testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:22,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742174_1350 (size=68) 2024-11-13T20:05:22,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742174_1350 (size=68) 2024-11-13T20:05:22,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742174_1350 (size=68) 2024-11-13T20:05:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742175_1351 (size=68) 2024-11-13T20:05:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742175_1351 (size=68) 2024-11-13T20:05:22,082 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:22,082 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing b54ae02535d1d27e78ca0186c0c95ab0, disabling compactions & flushes 2024-11-13T20:05:22,082 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,082 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,082 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. after waiting 0 ms 2024-11-13T20:05:22,082 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,082 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,082 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for b54ae02535d1d27e78ca0186c0c95ab0: Waiting for close lock at 1731528322082Disabling compacts and flushes for region at 1731528322082Disabling writes for close at 1731528322082Writing region close event to WAL at 1731528322082Closed at 1731528322082 2024-11-13T20:05:22,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742175_1351 (size=68) 2024-11-13T20:05:22,083 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:22,083 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 3a5d8ac785b157426666dce1adfe931e, disabling compactions & flushes 2024-11-13T20:05:22,083 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,083 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,083 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. after waiting 0 ms 2024-11-13T20:05:22,083 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,083 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,083 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3a5d8ac785b157426666dce1adfe931e: Waiting for close lock at 1731528322083Disabling compacts and flushes for region at 1731528322083Disabling writes for close at 1731528322083Writing region close event to WAL at 1731528322083Closed at 1731528322083 2024-11-13T20:05:22,084 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:05:22,084 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731528322084"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528322084"}]},"ts":"1731528322084"} 2024-11-13T20:05:22,084 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731528322084"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528322084"}]},"ts":"1731528322084"} 2024-11-13T20:05:22,089 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:05:22,090 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:05:22,090 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528322090"}]},"ts":"1731528322090"} 2024-11-13T20:05:22,092 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-13T20:05:22,092 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:05:22,093 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:05:22,093 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:05:22,093 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:05:22,093 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:05:22,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, ASSIGN}] 2024-11-13T20:05:22,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, ASSIGN 2024-11-13T20:05:22,095 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, ASSIGN 2024-11-13T20:05:22,096 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:05:22,096 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:05:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T20:05:22,246 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:05:22,247 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=3a5d8ac785b157426666dce1adfe931e, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:22,247 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=b54ae02535d1d27e78ca0186c0c95ab0, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:22,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, ASSIGN because future has completed 2024-11-13T20:05:22,250 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a5d8ac785b157426666dce1adfe931e, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:22,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, ASSIGN because future has completed 2024-11-13T20:05:22,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:22,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T20:05:22,413 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,413 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,413 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => b54ae02535d1d27e78ca0186c0c95ab0, NAME => 'testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:05:22,413 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 3a5d8ac785b157426666dce1adfe931e, NAME => 'testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:05:22,413 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. service=AccessControlService 2024-11-13T20:05:22,413 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. service=AccessControlService 2024-11-13T20:05:22,414 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:22,414 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,414 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,416 INFO [StoreOpener-3a5d8ac785b157426666dce1adfe931e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,418 INFO [StoreOpener-3a5d8ac785b157426666dce1adfe931e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a5d8ac785b157426666dce1adfe931e columnFamilyName cf 2024-11-13T20:05:22,419 INFO [StoreOpener-b54ae02535d1d27e78ca0186c0c95ab0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,419 DEBUG [StoreOpener-3a5d8ac785b157426666dce1adfe931e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:22,420 INFO [StoreOpener-3a5d8ac785b157426666dce1adfe931e-1 {}] regionserver.HStore(327): Store=3a5d8ac785b157426666dce1adfe931e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:22,420 INFO [StoreOpener-b54ae02535d1d27e78ca0186c0c95ab0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b54ae02535d1d27e78ca0186c0c95ab0 columnFamilyName cf 2024-11-13T20:05:22,420 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,421 DEBUG [StoreOpener-b54ae02535d1d27e78ca0186c0c95ab0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:22,423 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,426 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,426 INFO [StoreOpener-b54ae02535d1d27e78ca0186c0c95ab0-1 {}] regionserver.HStore(327): Store=b54ae02535d1d27e78ca0186c0c95ab0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:22,426 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,426 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,426 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,427 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,427 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,428 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,428 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,429 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,429 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,431 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:22,431 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:22,431 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 3a5d8ac785b157426666dce1adfe931e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74955728, jitterRate=0.1169273853302002}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:22,431 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,431 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened b54ae02535d1d27e78ca0186c0c95ab0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59647871, jitterRate=-0.111177459359169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:22,431 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,432 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for b54ae02535d1d27e78ca0186c0c95ab0: Running coprocessor pre-open hook at 1731528322414Writing region info on filesystem at 1731528322414Initializing all the Stores at 1731528322415 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528322415Cleaning up temporary data from old regions at 1731528322428 (+13 ms)Running coprocessor post-open hooks at 1731528322431 (+3 ms)Region opened successfully at 1731528322432 (+1 ms) 2024-11-13T20:05:22,432 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 3a5d8ac785b157426666dce1adfe931e: Running coprocessor pre-open hook at 1731528322414Writing region info on filesystem at 1731528322414Initializing all the Stores at 1731528322415 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528322415Cleaning up temporary data from old regions at 1731528322426 (+11 ms)Running coprocessor post-open hooks at 1731528322431 (+5 ms)Region opened successfully at 1731528322432 (+1 ms) 2024-11-13T20:05:22,433 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e., pid=134, masterSystemTime=1731528322405 2024-11-13T20:05:22,433 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0., pid=135, masterSystemTime=1731528322407 2024-11-13T20:05:22,435 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,435 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,437 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=3a5d8ac785b157426666dce1adfe931e, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:22,438 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,438 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=b54ae02535d1d27e78ca0186c0c95ab0, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:22,438 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a5d8ac785b157426666dce1adfe931e, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:22,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:22,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=132 2024-11-13T20:05:22,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0, server=54d0784d1943,36439,1731528160987 in 196 msec 2024-11-13T20:05:22,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=133 2024-11-13T20:05:22,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 3a5d8ac785b157426666dce1adfe931e, server=54d0784d1943,33333,1731528161197 in 199 msec 2024-11-13T20:05:22,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, ASSIGN in 358 msec 2024-11-13T20:05:22,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=131 2024-11-13T20:05:22,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, ASSIGN in 359 msec 2024-11-13T20:05:22,456 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:05:22,456 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528322456"}]},"ts":"1731528322456"} 2024-11-13T20:05:22,458 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-13T20:05:22,459 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:05:22,459 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-13T20:05:22,462 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T20:05:22,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:22,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:22,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:22,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:22,467 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:22,467 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:22,468 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:22,468 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:22,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 407 msec 2024-11-13T20:05:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T20:05:22,695 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T20:05:22,695 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:22,698 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-13T20:05:22,698 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,699 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:22,700 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:22,705 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:22,710 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:22,712 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T20:05:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528322712 (current time:1731528322712). 2024-11-13T20:05:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-13T20:05:22,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@221a8ec4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:22,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:22,714 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:22,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:22,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:22,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@492da704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:22,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:22,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:22,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:22,716 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:22,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a07f61d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:22,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:22,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:22,719 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:22,720 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:22,720 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46570233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:22,722 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:22,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:22,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:22,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e7fd9fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:22,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:22,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:22,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:22,723 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49250, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:22,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6732a53b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:22,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:22,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:22,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:22,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52874, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:22,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:22,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:22,728 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:22,730 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T20:05:22,731 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:22,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T20:05:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-13T20:05:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T20:05:22,733 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:22,734 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:22,740 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:22,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742176_1352 (size=170) 2024-11-13T20:05:22,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742176_1352 (size=170) 2024-11-13T20:05:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742176_1352 (size=170) 2024-11-13T20:05:22,769 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:22,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e}] 2024-11-13T20:05:22,774 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:22,774 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T20:05:22,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-13T20:05:22,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-13T20:05:22,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:22,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for b54ae02535d1d27e78ca0186c0c95ab0: 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 3a5d8ac785b157426666dce1adfe931e: 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742178_1354 (size=71) 2024-11-13T20:05:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742178_1354 (size=71) 2024-11-13T20:05:22,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742178_1354 (size=71) 2024-11-13T20:05:22,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:22,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-13T20:05:22,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-13T20:05:22,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,982 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:22,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742177_1353 (size=71) 2024-11-13T20:05:22,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742177_1353 (size=71) 2024-11-13T20:05:22,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742177_1353 (size=71) 2024-11-13T20:05:22,990 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0 in 214 msec 2024-11-13T20:05:23,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T20:05:23,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T20:05:23,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:23,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-13T20:05:23,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-13T20:05:23,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:23,386 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:23,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=136 2024-11-13T20:05:23,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e in 619 msec 2024-11-13T20:05:23,406 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:23,411 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:23,425 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:23,425 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:23,426 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:23,431 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:05:23,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742179_1355 (size=63) 2024-11-13T20:05:23,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742179_1355 (size=63) 2024-11-13T20:05:23,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742179_1355 (size=63) 2024-11-13T20:05:23,476 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:23,476 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-13T20:05:23,477 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-13T20:05:23,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742180_1356 (size=653) 2024-11-13T20:05:23,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742180_1356 (size=653) 2024-11-13T20:05:23,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742180_1356 (size=653) 2024-11-13T20:05:23,554 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:23,571 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:23,572 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-13T20:05:23,574 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:23,574 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-13T20:05:23,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 843 msec 2024-11-13T20:05:23,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T20:05:23,876 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T20:05:23,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:23,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:23,886 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:23,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-13T20:05:23,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:23,889 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:23,891 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:23,897 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:23,904 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:23,913 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T20:05:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528323913 (current time:1731528323913). 2024-11-13T20:05:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-13T20:05:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13f82ea0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:23,917 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:23,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:23,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:23,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25e07d1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:23,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:23,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:23,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:23,920 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:23,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e961561, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:23,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:23,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:23,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:23,936 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:23,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:23,937 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@488dc731, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:23,946 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:23,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:23,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67510080, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:23,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:23,956 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49286, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:23,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dab1af4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:23,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:23,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:23,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:23,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52896, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:23,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:23,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:23,973 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:23,976 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:23,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:23,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:23,976 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:23,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:23,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T20:05:23,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:23,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T20:05:23,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-13T20:05:23,982 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T20:05:23,984 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:23,989 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:24,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742181_1357 (size=165) 2024-11-13T20:05:24,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742181_1357 (size=165) 2024-11-13T20:05:24,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742181_1357 (size=165) 2024-11-13T20:05:24,024 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:24,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e}] 2024-11-13T20:05:24,026 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:24,026 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:24,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T20:05:24,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-13T20:05:24,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-13T20:05:24,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:24,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:24,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing b54ae02535d1d27e78ca0186c0c95ab0 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-13T20:05:24,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 3a5d8ac785b157426666dce1adfe931e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-13T20:05:24,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0 is 71, key is 032782573a2a74de85c5238635b048e4/cf:q/1731528323881/Put/seqid=0 2024-11-13T20:05:24,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e is 71, key is 14f1d982e322fee6dbedef3848ed394f/cf:q/1731528323884/Put/seqid=0 2024-11-13T20:05:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742183_1359 (size=8171) 2024-11-13T20:05:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742183_1359 (size=8171) 2024-11-13T20:05:24,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742183_1359 (size=8171) 2024-11-13T20:05:24,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:24,298 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:24,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/.tmp/cf/dec6f0eb145a49dbb05ac05b22ecfa21, store: [table=testtb-testExportExpiredSnapshot family=cf region=3a5d8ac785b157426666dce1adfe931e] 2024-11-13T20:05:24,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/.tmp/cf/dec6f0eb145a49dbb05ac05b22ecfa21 is 209, key is 18500aee5086dda3e4035c180215f8fa0/cf:q/1731528323884/Put/seqid=0 2024-11-13T20:05:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T20:05:24,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742182_1358 (size=5101) 2024-11-13T20:05:24,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742182_1358 (size=5101) 2024-11-13T20:05:24,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742182_1358 (size=5101) 2024-11-13T20:05:24,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:24,314 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:24,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/.tmp/cf/be2ebebf370d4cf29db1f0d1665f939b, store: [table=testtb-testExportExpiredSnapshot family=cf region=b54ae02535d1d27e78ca0186c0c95ab0] 2024-11-13T20:05:24,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/.tmp/cf/be2ebebf370d4cf29db1f0d1665f939b is 209, key is 05dc112c4226dcd7923529d39dc1fe893/cf:q/1731528323881/Put/seqid=0 2024-11-13T20:05:24,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742184_1360 (size=14997) 2024-11-13T20:05:24,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742184_1360 (size=14997) 2024-11-13T20:05:24,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742184_1360 (size=14997) 2024-11-13T20:05:24,330 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/.tmp/cf/dec6f0eb145a49dbb05ac05b22ecfa21 2024-11-13T20:05:24,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/.tmp/cf/dec6f0eb145a49dbb05ac05b22ecfa21 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf/dec6f0eb145a49dbb05ac05b22ecfa21 2024-11-13T20:05:24,351 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf/dec6f0eb145a49dbb05ac05b22ecfa21, entries=47, sequenceid=6, filesize=14.6 K 2024-11-13T20:05:24,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742185_1361 (size=5916) 2024-11-13T20:05:24,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742185_1361 (size=5916) 2024-11-13T20:05:24,354 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 3a5d8ac785b157426666dce1adfe931e in 172ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:24,354 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-13T20:05:24,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 3a5d8ac785b157426666dce1adfe931e: 2024-11-13T20:05:24,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. for snaptb0-testExportExpiredSnapshot completed. 2024-11-13T20:05:24,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:24,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf/dec6f0eb145a49dbb05ac05b22ecfa21] hfiles 2024-11-13T20:05:24,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf/dec6f0eb145a49dbb05ac05b22ecfa21 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742185_1361 (size=5916) 2024-11-13T20:05:24,355 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/.tmp/cf/be2ebebf370d4cf29db1f0d1665f939b 2024-11-13T20:05:24,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/.tmp/cf/be2ebebf370d4cf29db1f0d1665f939b as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf/be2ebebf370d4cf29db1f0d1665f939b 2024-11-13T20:05:24,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf/be2ebebf370d4cf29db1f0d1665f939b, entries=3, sequenceid=6, filesize=5.8 K 2024-11-13T20:05:24,372 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for b54ae02535d1d27e78ca0186c0c95ab0 in 191ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:24,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for b54ae02535d1d27e78ca0186c0c95ab0: 2024-11-13T20:05:24,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. for snaptb0-testExportExpiredSnapshot completed. 2024-11-13T20:05:24,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:24,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf/be2ebebf370d4cf29db1f0d1665f939b] hfiles 2024-11-13T20:05:24,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf/be2ebebf370d4cf29db1f0d1665f939b for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742186_1362 (size=110) 2024-11-13T20:05:24,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742186_1362 (size=110) 2024-11-13T20:05:24,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742186_1362 (size=110) 2024-11-13T20:05:24,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:24,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-13T20:05:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-13T20:05:24,380 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:24,381 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:24,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a5d8ac785b157426666dce1adfe931e in 356 msec 2024-11-13T20:05:24,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742187_1363 (size=110) 2024-11-13T20:05:24,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742187_1363 (size=110) 2024-11-13T20:05:24,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742187_1363 (size=110) 2024-11-13T20:05:24,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:24,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-13T20:05:24,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-13T20:05:24,398 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:24,398 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:24,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=140, resume processing ppid=139 2024-11-13T20:05:24,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0 in 374 msec 2024-11-13T20:05:24,400 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:24,401 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:24,407 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:24,407 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:24,407 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:24,409 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0] hfiles 2024-11-13T20:05:24,409 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:24,409 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742188_1364 (size=294) 2024-11-13T20:05:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742188_1364 (size=294) 2024-11-13T20:05:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742188_1364 (size=294) 2024-11-13T20:05:24,419 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:24,419 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,419 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742189_1365 (size=963) 2024-11-13T20:05:24,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742189_1365 (size=963) 2024-11-13T20:05:24,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742189_1365 (size=963) 2024-11-13T20:05:24,431 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:24,441 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:24,442 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:24,444 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:24,444 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-13T20:05:24,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 465 msec 2024-11-13T20:05:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T20:05:24,616 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T20:05:24,617 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:05:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-13T20:05:24,620 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:05:24,621 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-13T20:05:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T20:05:24,622 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:05:24,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742190_1366 (size=436) 2024-11-13T20:05:24,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742190_1366 (size=436) 2024-11-13T20:05:24,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742190_1366 (size=436) 2024-11-13T20:05:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T20:05:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T20:05:25,078 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 02ff9aeb72cf9b21fd2a7dfcab36dd97, NAME => 'testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:25,078 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b2c0df47f245978cdca563d59f9f3349, NAME => 'testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:25,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742192_1368 (size=61) 2024-11-13T20:05:25,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742191_1367 (size=61) 2024-11-13T20:05:25,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742192_1368 (size=61) 2024-11-13T20:05:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742192_1368 (size=61) 2024-11-13T20:05:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742191_1367 (size=61) 2024-11-13T20:05:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742191_1367 (size=61) 2024-11-13T20:05:25,086 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 02ff9aeb72cf9b21fd2a7dfcab36dd97, disabling compactions & flushes 2024-11-13T20:05:25,087 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. after waiting 0 ms 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing b2c0df47f245978cdca563d59f9f3349, disabling compactions & flushes 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,087 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,087 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. after waiting 0 ms 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 02ff9aeb72cf9b21fd2a7dfcab36dd97: Waiting for close lock at 1731528325087Disabling compacts and flushes for region at 1731528325087Disabling writes for close at 1731528325087Writing region close event to WAL at 1731528325087Closed at 1731528325087 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,087 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,087 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for b2c0df47f245978cdca563d59f9f3349: Waiting for close lock at 1731528325087Disabling compacts and flushes for region at 1731528325087Disabling writes for close at 1731528325087Writing region close event to WAL at 1731528325087Closed at 1731528325087 2024-11-13T20:05:25,088 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:05:25,088 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731528325088"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528325088"}]},"ts":"1731528325088"} 2024-11-13T20:05:25,088 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731528325088"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528325088"}]},"ts":"1731528325088"} 2024-11-13T20:05:25,091 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:05:25,092 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:05:25,092 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528325092"}]},"ts":"1731528325092"} 2024-11-13T20:05:25,093 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-13T20:05:25,094 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:05:25,095 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:05:25,095 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:05:25,095 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:05:25,095 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:05:25,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=02ff9aeb72cf9b21fd2a7dfcab36dd97, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b2c0df47f245978cdca563d59f9f3349, ASSIGN}] 2024-11-13T20:05:25,096 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=02ff9aeb72cf9b21fd2a7dfcab36dd97, ASSIGN 2024-11-13T20:05:25,096 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b2c0df47f245978cdca563d59f9f3349, ASSIGN 2024-11-13T20:05:25,097 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=02ff9aeb72cf9b21fd2a7dfcab36dd97, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:05:25,097 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b2c0df47f245978cdca563d59f9f3349, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:05:25,236 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0006_000001 (auth:SIMPLE) from 127.0.0.1:52356 2024-11-13T20:05:25,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T20:05:25,245 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_3/usercache/jenkins/appcache/application_1731528172678_0006/container_1731528172678_0006_01_000001/launch_container.sh] 2024-11-13T20:05:25,245 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_3/usercache/jenkins/appcache/application_1731528172678_0006/container_1731528172678_0006_01_000001/container_tokens] 2024-11-13T20:05:25,245 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_3/usercache/jenkins/appcache/application_1731528172678_0006/container_1731528172678_0006_01_000001/sysfs] 2024-11-13T20:05:25,247 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:05:25,247 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=b2c0df47f245978cdca563d59f9f3349, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:25,247 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=02ff9aeb72cf9b21fd2a7dfcab36dd97, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:25,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b2c0df47f245978cdca563d59f9f3349, ASSIGN because future has completed 2024-11-13T20:05:25,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure b2c0df47f245978cdca563d59f9f3349, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:05:25,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=02ff9aeb72cf9b21fd2a7dfcab36dd97, ASSIGN because future has completed 2024-11-13T20:05:25,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:25,409 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,409 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,409 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 02ff9aeb72cf9b21fd2a7dfcab36dd97, NAME => 'testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:05:25,409 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => b2c0df47f245978cdca563d59f9f3349, NAME => 'testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:05:25,409 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. service=AccessControlService 2024-11-13T20:05:25,409 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. service=AccessControlService 2024-11-13T20:05:25,410 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:25,410 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,410 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,411 INFO [StoreOpener-02ff9aeb72cf9b21fd2a7dfcab36dd97-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,413 INFO [StoreOpener-02ff9aeb72cf9b21fd2a7dfcab36dd97-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02ff9aeb72cf9b21fd2a7dfcab36dd97 columnFamilyName cf 2024-11-13T20:05:25,417 DEBUG [StoreOpener-02ff9aeb72cf9b21fd2a7dfcab36dd97-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:25,417 INFO [StoreOpener-02ff9aeb72cf9b21fd2a7dfcab36dd97-1 {}] regionserver.HStore(327): Store=02ff9aeb72cf9b21fd2a7dfcab36dd97/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:25,417 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,418 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,418 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,419 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,419 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,421 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,423 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:25,423 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 02ff9aeb72cf9b21fd2a7dfcab36dd97; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68648153, jitterRate=0.022937193512916565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:25,423 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,424 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 02ff9aeb72cf9b21fd2a7dfcab36dd97: Running coprocessor pre-open hook at 1731528325410Writing region info on filesystem at 1731528325410Initializing all the Stores at 1731528325411 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528325411Cleaning up temporary data from old regions at 1731528325419 (+8 ms)Running coprocessor post-open hooks at 1731528325423 (+4 ms)Region opened successfully at 1731528325424 (+1 ms) 2024-11-13T20:05:25,425 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97., pid=146, masterSystemTime=1731528325403 2024-11-13T20:05:25,425 INFO [StoreOpener-b2c0df47f245978cdca563d59f9f3349-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,427 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,427 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,428 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=02ff9aeb72cf9b21fd2a7dfcab36dd97, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:25,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:25,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=143 2024-11-13T20:05:25,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97, server=54d0784d1943,33333,1731528161197 in 180 msec 2024-11-13T20:05:25,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=02ff9aeb72cf9b21fd2a7dfcab36dd97, ASSIGN in 337 msec 2024-11-13T20:05:25,441 INFO [StoreOpener-b2c0df47f245978cdca563d59f9f3349-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b2c0df47f245978cdca563d59f9f3349 columnFamilyName cf 2024-11-13T20:05:25,441 DEBUG [StoreOpener-b2c0df47f245978cdca563d59f9f3349-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:25,442 INFO [StoreOpener-b2c0df47f245978cdca563d59f9f3349-1 {}] regionserver.HStore(327): Store=b2c0df47f245978cdca563d59f9f3349/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:25,442 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,443 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,445 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,445 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,445 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,447 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,449 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:25,449 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened b2c0df47f245978cdca563d59f9f3349; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66970692, jitterRate=-0.0020589232444763184}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:25,450 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,450 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for b2c0df47f245978cdca563d59f9f3349: Running coprocessor pre-open hook at 1731528325410Writing region info on filesystem at 1731528325410Initializing all the Stores at 1731528325411 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528325411Cleaning up temporary data from old regions at 1731528325445 (+34 ms)Running coprocessor post-open hooks at 1731528325450 (+5 ms)Region opened successfully at 1731528325450 2024-11-13T20:05:25,451 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349., pid=145, masterSystemTime=1731528325402 2024-11-13T20:05:25,452 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,452 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:25,453 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=b2c0df47f245978cdca563d59f9f3349, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:25,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure b2c0df47f245978cdca563d59f9f3349, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:05:25,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=144 2024-11-13T20:05:25,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure b2c0df47f245978cdca563d59f9f3349, server=54d0784d1943,39533,1731528161273 in 207 msec 2024-11-13T20:05:25,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=142 2024-11-13T20:05:25,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b2c0df47f245978cdca563d59f9f3349, ASSIGN in 369 msec 2024-11-13T20:05:25,467 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:05:25,467 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528325467"}]},"ts":"1731528325467"} 2024-11-13T20:05:25,474 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-13T20:05:25,475 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:05:25,476 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-13T20:05:25,481 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T20:05:25,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:25,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:25,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:25,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:25,493 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,493 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,495 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,495 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,495 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,495 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 876 msec 2024-11-13T20:05:25,505 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,506 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T20:05:25,756 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-13T20:05:25,756 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,759 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-13T20:05:25,759 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,759 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:25,761 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,767 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,774 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:25,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:25,789 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,792 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-13T20:05:25,792 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:25,792 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:25,794 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,799 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T20:05:25,806 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-13T20:05:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-13T20:05:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60f5f936, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:25,808 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:25,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:25,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:25,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@143659fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:25,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:25,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:25,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:25,810 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49310, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:25,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c03d00c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:25,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:25,816 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:25,817 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:25,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:25,818 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:25,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50c38b0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:25,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:25,822 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:25,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:25,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:25,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20cc560, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:25,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:25,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:25,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:25,824 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:25,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45f10193, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:25,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:25,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:25,828 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52904, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:25,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:25,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:25,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:25,833 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:25,834 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T20:05:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-13T20:05:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-13T20:05:25,837 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T20:05:25,838 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:25,840 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:25,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742193_1369 (size=152) 2024-11-13T20:05:25,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742193_1369 (size=152) 2024-11-13T20:05:25,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742193_1369 (size=152) 2024-11-13T20:05:25,851 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:25,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b2c0df47f245978cdca563d59f9f3349}] 2024-11-13T20:05:25,852 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:25,852 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T20:05:26,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-13T20:05:26,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:26,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing 02ff9aeb72cf9b21fd2a7dfcab36dd97 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-13T20:05:26,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-13T20:05:26,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:26,010 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing b2c0df47f245978cdca563d59f9f3349 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-13T20:05:26,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113fe86ff3415d149aba1ea9bc252faa3e5_02ff9aeb72cf9b21fd2a7dfcab36dd97 is 71, key is 0372bb428aa9e6ac65b616eff5a22a55/cf:q/1731528325784/Put/seqid=0 2024-11-13T20:05:26,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113383cbe8415b14476af9ff0ddc23c2a63_b2c0df47f245978cdca563d59f9f3349 is 71, key is 17259c09fd1ff363f32f6a143bf17f5d/cf:q/1731528325787/Put/seqid=0 2024-11-13T20:05:26,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742194_1370 (size=5032) 2024-11-13T20:05:26,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742194_1370 (size=5032) 2024-11-13T20:05:26,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742194_1370 (size=5032) 2024-11-13T20:05:26,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:26,109 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113fe86ff3415d149aba1ea9bc252faa3e5_02ff9aeb72cf9b21fd2a7dfcab36dd97 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241113fe86ff3415d149aba1ea9bc252faa3e5_02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:26,110 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/.tmp/cf/ac596ca4c5f24632af9df3da633eaf5d, store: [table=testExportExpiredSnapshot family=cf region=02ff9aeb72cf9b21fd2a7dfcab36dd97] 2024-11-13T20:05:26,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/.tmp/cf/ac596ca4c5f24632af9df3da633eaf5d is 202, key is 0e42f8281a6afaf52f549251aac914bc6/cf:q/1731528325784/Put/seqid=0 2024-11-13T20:05:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T20:05:26,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742196_1372 (size=5688) 2024-11-13T20:05:26,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742196_1372 (size=5688) 2024-11-13T20:05:26,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742196_1372 (size=5688) 2024-11-13T20:05:26,158 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/.tmp/cf/ac596ca4c5f24632af9df3da633eaf5d 2024-11-13T20:05:26,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/.tmp/cf/ac596ca4c5f24632af9df3da633eaf5d as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/cf/ac596ca4c5f24632af9df3da633eaf5d 2024-11-13T20:05:26,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742195_1371 (size=8242) 2024-11-13T20:05:26,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742195_1371 (size=8242) 2024-11-13T20:05:26,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742195_1371 (size=8242) 2024-11-13T20:05:26,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:26,171 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/cf/ac596ca4c5f24632af9df3da633eaf5d, entries=2, sequenceid=5, filesize=5.6 K 2024-11-13T20:05:26,173 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 02ff9aeb72cf9b21fd2a7dfcab36dd97 in 168ms, sequenceid=5, compaction requested=false 2024-11-13T20:05:26,173 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-13T20:05:26,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for 02ff9aeb72cf9b21fd2a7dfcab36dd97: 2024-11-13T20:05:26,174 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113383cbe8415b14476af9ff0ddc23c2a63_b2c0df47f245978cdca563d59f9f3349 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241113383cbe8415b14476af9ff0ddc23c2a63_b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:26,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. for snapshot-testExportExpiredSnapshot completed. 2024-11-13T20:05:26,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:26,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/cf/ac596ca4c5f24632af9df3da633eaf5d] hfiles 2024-11-13T20:05:26,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/cf/ac596ca4c5f24632af9df3da633eaf5d for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/.tmp/cf/363324a8f20a479090b8c1378b71ae2c, store: [table=testExportExpiredSnapshot family=cf region=b2c0df47f245978cdca563d59f9f3349] 2024-11-13T20:05:26,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/.tmp/cf/363324a8f20a479090b8c1378b71ae2c is 202, key is 12ddc48713a92f96a9b7fc585aff764d2/cf:q/1731528325787/Put/seqid=0 2024-11-13T20:05:26,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742198_1374 (size=14861) 2024-11-13T20:05:26,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742198_1374 (size=14861) 2024-11-13T20:05:26,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742198_1374 (size=14861) 2024-11-13T20:05:26,205 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/.tmp/cf/363324a8f20a479090b8c1378b71ae2c 2024-11-13T20:05:26,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/.tmp/cf/363324a8f20a479090b8c1378b71ae2c as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/cf/363324a8f20a479090b8c1378b71ae2c 2024-11-13T20:05:26,219 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/cf/363324a8f20a479090b8c1378b71ae2c, entries=48, sequenceid=5, filesize=14.5 K 2024-11-13T20:05:26,220 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for b2c0df47f245978cdca563d59f9f3349 in 210ms, sequenceid=5, compaction requested=false 2024-11-13T20:05:26,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for b2c0df47f245978cdca563d59f9f3349: 2024-11-13T20:05:26,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. for snapshot-testExportExpiredSnapshot completed. 2024-11-13T20:05:26,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:26,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/cf/363324a8f20a479090b8c1378b71ae2c] hfiles 2024-11-13T20:05:26,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/cf/363324a8f20a479090b8c1378b71ae2c for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742197_1373 (size=103) 2024-11-13T20:05:26,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742197_1373 (size=103) 2024-11-13T20:05:26,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742197_1373 (size=103) 2024-11-13T20:05:26,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:05:26,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-13T20:05:26,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-13T20:05:26,262 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:26,263 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:26,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 02ff9aeb72cf9b21fd2a7dfcab36dd97 in 413 msec 2024-11-13T20:05:26,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742199_1375 (size=103) 2024-11-13T20:05:26,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742199_1375 (size=103) 2024-11-13T20:05:26,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:05:26,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-13T20:05:26,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-13T20:05:26,330 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:26,330 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742199_1375 (size=103) 2024-11-13T20:05:26,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=147 2024-11-13T20:05:26,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b2c0df47f245978cdca563d59f9f3349 in 480 msec 2024-11-13T20:05:26,333 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:26,334 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:26,335 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:26,336 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:26,336 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:26,337 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241113383cbe8415b14476af9ff0ddc23c2a63_b2c0df47f245978cdca563d59f9f3349, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241113fe86ff3415d149aba1ea9bc252faa3e5_02ff9aeb72cf9b21fd2a7dfcab36dd97] hfiles 2024-11-13T20:05:26,337 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241113383cbe8415b14476af9ff0ddc23c2a63_b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:05:26,337 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241113fe86ff3415d149aba1ea9bc252faa3e5_02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:05:26,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742200_1376 (size=287) 2024-11-13T20:05:26,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742200_1376 (size=287) 2024-11-13T20:05:26,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742200_1376 (size=287) 2024-11-13T20:05:26,361 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:26,361 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,362 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742201_1377 (size=935) 2024-11-13T20:05:26,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742201_1377 (size=935) 2024-11-13T20:05:26,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742201_1377 (size=935) 2024-11-13T20:05:26,415 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:26,430 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:26,430 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-13T20:05:26,432 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:26,432 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-13T20:05:26,434 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 597 msec 2024-11-13T20:05:26,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T20:05:26,465 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-13T20:05:26,825 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:05:30,609 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-13T20:05:30,609 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-13T20:05:30,610 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-13T20:05:30,610 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-13T20:05:30,611 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T20:05:30,611 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T20:05:36,113 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:05:36,478 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528336478 2024-11-13T20:05:36,478 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528336478, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528336478, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:36,507 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:36,507 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528336478, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528336478/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-13T20:05:36,511 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:05:36,513 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:05:36,517 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T20:05:36,523 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528336523"}]},"ts":"1731528336523"} 2024-11-13T20:05:36,525 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-13T20:05:36,526 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-13T20:05:36,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-13T20:05:36,529 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, UNASSIGN}] 2024-11-13T20:05:36,530 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, UNASSIGN 2024-11-13T20:05:36,531 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=3a5d8ac785b157426666dce1adfe931e, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:36,532 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, UNASSIGN 2024-11-13T20:05:36,534 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=b54ae02535d1d27e78ca0186c0c95ab0, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:36,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, UNASSIGN because future has completed 2024-11-13T20:05:36,534 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:36,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a5d8ac785b157426666dce1adfe931e, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:36,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, UNASSIGN because future has completed 2024-11-13T20:05:36,542 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:36,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T20:05:36,694 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:36,694 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:36,694 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 3a5d8ac785b157426666dce1adfe931e, disabling compactions & flushes 2024-11-13T20:05:36,694 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:36,694 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:36,694 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. after waiting 0 ms 2024-11-13T20:05:36,694 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:36,694 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:36,694 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:36,695 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing b54ae02535d1d27e78ca0186c0c95ab0, disabling compactions & flushes 2024-11-13T20:05:36,695 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:36,695 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:36,695 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. after waiting 0 ms 2024-11-13T20:05:36,695 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:36,702 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:05:36,702 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:36,702 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e. 2024-11-13T20:05:36,702 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 3a5d8ac785b157426666dce1adfe931e: Waiting for close lock at 1731528336694Running coprocessor pre-close hooks at 1731528336694Disabling compacts and flushes for region at 1731528336694Disabling writes for close at 1731528336694Writing region close event to WAL at 1731528336695 (+1 ms)Running coprocessor post-close hooks at 1731528336702 (+7 ms)Closed at 1731528336702 2024-11-13T20:05:36,705 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:36,706 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=3a5d8ac785b157426666dce1adfe931e, regionState=CLOSED 2024-11-13T20:05:36,707 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:05:36,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a5d8ac785b157426666dce1adfe931e, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:36,708 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:36,708 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0. 2024-11-13T20:05:36,708 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for b54ae02535d1d27e78ca0186c0c95ab0: Waiting for close lock at 1731528336695Running coprocessor pre-close hooks at 1731528336695Disabling compacts and flushes for region at 1731528336695Disabling writes for close at 1731528336695Writing region close event to WAL at 1731528336696 (+1 ms)Running coprocessor post-close hooks at 1731528336708 (+12 ms)Closed at 1731528336708 2024-11-13T20:05:36,710 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:36,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-13T20:05:36,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 3a5d8ac785b157426666dce1adfe931e, server=54d0784d1943,33333,1731528161197 in 174 msec 2024-11-13T20:05:36,712 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=b54ae02535d1d27e78ca0186c0c95ab0, regionState=CLOSED 2024-11-13T20:05:36,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:36,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3a5d8ac785b157426666dce1adfe931e, UNASSIGN in 182 msec 2024-11-13T20:05:36,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-11-13T20:05:36,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure b54ae02535d1d27e78ca0186c0c95ab0, server=54d0784d1943,36439,1731528160987 in 173 msec 2024-11-13T20:05:36,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=151 2024-11-13T20:05:36,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b54ae02535d1d27e78ca0186c0c95ab0, UNASSIGN in 188 msec 2024-11-13T20:05:36,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-13T20:05:36,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 194 msec 2024-11-13T20:05:36,723 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528336723"}]},"ts":"1731528336723"} 2024-11-13T20:05:36,725 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-13T20:05:36,725 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-13T20:05:36,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 208 msec 2024-11-13T20:05:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T20:05:36,845 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T20:05:36,846 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,848 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,849 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,853 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,854 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:36,854 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:36,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,857 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/recovered.edits] 2024-11-13T20:05:36,857 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/recovered.edits] 2024-11-13T20:05:36,858 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T20:05:36,858 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T20:05:36,858 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T20:05:36,858 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T20:05:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:36,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:36,861 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:36,861 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-13T20:05:36,862 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:36,863 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:36,864 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf/be2ebebf370d4cf29db1f0d1665f939b to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/cf/be2ebebf370d4cf29db1f0d1665f939b 2024-11-13T20:05:36,864 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf/dec6f0eb145a49dbb05ac05b22ecfa21 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/cf/dec6f0eb145a49dbb05ac05b22ecfa21 2024-11-13T20:05:36,867 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0/recovered.edits/9.seqid 2024-11-13T20:05:36,867 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:36,868 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e/recovered.edits/9.seqid 2024-11-13T20:05:36,868 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportExpiredSnapshot/3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:36,868 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-13T20:05:36,869 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-13T20:05:36,870 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-11-13T20:05:36,873 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411137f6960186db943bf9042670c1fb217cd_3a5d8ac785b157426666dce1adfe931e 2024-11-13T20:05:36,875 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e2024111318bd0aadafed4034a54a3bcacb81708c_b54ae02535d1d27e78ca0186c0c95ab0 2024-11-13T20:05:36,875 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-13T20:05:36,878 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,881 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-13T20:05:36,883 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-13T20:05:36,884 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,884 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-13T20:05:36,885 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528336884"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:36,885 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528336884"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:36,888 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:05:36,888 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b54ae02535d1d27e78ca0186c0c95ab0, NAME => 'testtb-testExportExpiredSnapshot,,1731528322060.b54ae02535d1d27e78ca0186c0c95ab0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3a5d8ac785b157426666dce1adfe931e, NAME => 'testtb-testExportExpiredSnapshot,1,1731528322060.3a5d8ac785b157426666dce1adfe931e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:05:36,888 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-13T20:05:36,888 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528336888"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:36,890 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-13T20:05:36,891 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,893 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 45 msec 2024-11-13T20:05:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-13T20:05:36,966 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-13T20:05:36,966 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T20:05:36,976 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-13T20:05:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-13T20:05:36,979 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-13T20:05:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-13T20:05:36,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-13T20:05:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-13T20:05:37,011 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=781 (was 795), OpenFileDescriptor=765 (was 785), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=714 (was 827), ProcessCount=12 (was 20), AvailableMemoryMB=3344 (was 667) - AvailableMemoryMB LEAK? - 2024-11-13T20:05:37,011 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-11-13T20:05:37,030 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=781, OpenFileDescriptor=765, MaxFileDescriptor=1048576, SystemLoadAverage=714, ProcessCount=12, AvailableMemoryMB=3342 2024-11-13T20:05:37,030 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-11-13T20:05:37,031 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:05:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:37,034 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:05:37,034 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-13T20:05:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T20:05:37,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:05:37,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742202_1378 (size=448) 2024-11-13T20:05:37,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742202_1378 (size=448) 2024-11-13T20:05:37,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742202_1378 (size=448) 2024-11-13T20:05:37,046 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 75207323ae6a8438fb6987eadd1d7fbd, NAME => 'testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:37,046 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c8ceb9a9f48fcf070601c17b39a1580c, NAME => 'testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:37,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742203_1379 (size=73) 2024-11-13T20:05:37,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742203_1379 (size=73) 2024-11-13T20:05:37,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742203_1379 (size=73) 2024-11-13T20:05:37,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742204_1380 (size=73) 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:37,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742204_1380 (size=73) 2024-11-13T20:05:37,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742204_1380 (size=73) 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing c8ceb9a9f48fcf070601c17b39a1580c, disabling compactions & flushes 2024-11-13T20:05:37,064 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. after waiting 0 ms 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,064 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for c8ceb9a9f48fcf070601c17b39a1580c: Waiting for close lock at 1731528337064Disabling compacts and flushes for region at 1731528337064Disabling writes for close at 1731528337064Writing region close event to WAL at 1731528337064Closed at 1731528337064 2024-11-13T20:05:37,064 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:37,065 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 75207323ae6a8438fb6987eadd1d7fbd, disabling compactions & flushes 2024-11-13T20:05:37,065 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,065 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,065 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. after waiting 0 ms 2024-11-13T20:05:37,065 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,065 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,065 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 75207323ae6a8438fb6987eadd1d7fbd: Waiting for close lock at 1731528337065Disabling compacts and flushes for region at 1731528337065Disabling writes for close at 1731528337065Writing region close event to WAL at 1731528337065Closed at 1731528337065 2024-11-13T20:05:37,066 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:05:37,067 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731528337067"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528337067"}]},"ts":"1731528337067"} 2024-11-13T20:05:37,067 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731528337067"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528337067"}]},"ts":"1731528337067"} 2024-11-13T20:05:37,071 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:05:37,072 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:05:37,072 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528337072"}]},"ts":"1731528337072"} 2024-11-13T20:05:37,075 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-13T20:05:37,075 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:05:37,078 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:05:37,078 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:05:37,078 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:05:37,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:05:37,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, ASSIGN}] 2024-11-13T20:05:37,080 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, ASSIGN 2024-11-13T20:05:37,080 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, ASSIGN 2024-11-13T20:05:37,082 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:05:37,082 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:05:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T20:05:37,232 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:05:37,233 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=75207323ae6a8438fb6987eadd1d7fbd, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:37,233 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=c8ceb9a9f48fcf070601c17b39a1580c, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:37,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, ASSIGN because future has completed 2024-11-13T20:05:37,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:05:37,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, ASSIGN because future has completed 2024-11-13T20:05:37,237 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T20:05:37,392 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,392 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => c8ceb9a9f48fcf070601c17b39a1580c, NAME => 'testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. service=AccessControlService 2024-11-13T20:05:37,393 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => 75207323ae6a8438fb6987eadd1d7fbd, NAME => 'testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:05:37,393 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,393 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. service=AccessControlService 2024-11-13T20:05:37,393 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:37,394 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,394 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:37,394 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,394 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,394 INFO [StoreOpener-c8ceb9a9f48fcf070601c17b39a1580c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,395 INFO [StoreOpener-75207323ae6a8438fb6987eadd1d7fbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,396 INFO [StoreOpener-c8ceb9a9f48fcf070601c17b39a1580c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8ceb9a9f48fcf070601c17b39a1580c columnFamilyName cf 2024-11-13T20:05:37,396 INFO [StoreOpener-75207323ae6a8438fb6987eadd1d7fbd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75207323ae6a8438fb6987eadd1d7fbd columnFamilyName cf 2024-11-13T20:05:37,397 DEBUG [StoreOpener-c8ceb9a9f48fcf070601c17b39a1580c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:37,397 DEBUG [StoreOpener-75207323ae6a8438fb6987eadd1d7fbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:37,397 INFO [StoreOpener-c8ceb9a9f48fcf070601c17b39a1580c-1 {}] regionserver.HStore(327): Store=c8ceb9a9f48fcf070601c17b39a1580c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:37,398 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,398 INFO [StoreOpener-75207323ae6a8438fb6987eadd1d7fbd-1 {}] regionserver.HStore(327): Store=75207323ae6a8438fb6987eadd1d7fbd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:37,398 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,398 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,398 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,399 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,399 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,399 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,399 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,400 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,400 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,400 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,401 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,402 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:37,403 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened c8ceb9a9f48fcf070601c17b39a1580c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63521072, jitterRate=-0.05346226692199707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:37,403 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,403 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:37,403 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for c8ceb9a9f48fcf070601c17b39a1580c: Running coprocessor pre-open hook at 1731528337393Writing region info on filesystem at 1731528337393Initializing all the Stores at 1731528337394 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528337394Cleaning up temporary data from old regions at 1731528337399 (+5 ms)Running coprocessor post-open hooks at 1731528337403 (+4 ms)Region opened successfully at 1731528337403 2024-11-13T20:05:37,404 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened 75207323ae6a8438fb6987eadd1d7fbd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62553058, jitterRate=-0.06788679957389832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:37,404 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,404 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for 75207323ae6a8438fb6987eadd1d7fbd: Running coprocessor pre-open hook at 1731528337394Writing region info on filesystem at 1731528337394Initializing all the Stores at 1731528337395 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528337395Cleaning up temporary data from old regions at 1731528337400 (+5 ms)Running coprocessor post-open hooks at 1731528337404 (+4 ms)Region opened successfully at 1731528337404 2024-11-13T20:05:37,405 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd., pid=161, masterSystemTime=1731528337390 2024-11-13T20:05:37,406 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,406 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,406 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c., pid=160, masterSystemTime=1731528337388 2024-11-13T20:05:37,407 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=75207323ae6a8438fb6987eadd1d7fbd, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:37,407 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,408 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,408 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=c8ceb9a9f48fcf070601c17b39a1580c, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:37,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:37,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:05:37,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=158 2024-11-13T20:05:37,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd, server=54d0784d1943,36439,1731528160987 in 177 msec 2024-11-13T20:05:37,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-13T20:05:37,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c, server=54d0784d1943,39533,1731528161273 in 179 msec 2024-11-13T20:05:37,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, ASSIGN in 337 msec 2024-11-13T20:05:37,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-13T20:05:37,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, ASSIGN in 338 msec 2024-11-13T20:05:37,418 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:05:37,418 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528337418"}]},"ts":"1731528337418"} 2024-11-13T20:05:37,420 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-13T20:05:37,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:05:37,420 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-13T20:05:37,423 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T20:05:37,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:37,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:37,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:37,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:37,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 396 msec 2024-11-13T20:05:37,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T20:05:37,666 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T20:05:37,666 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:37,668 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-13T20:05:37,668 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,668 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:37,670 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:37,675 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:37,680 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:37,683 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:05:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528337683 (current time:1731528337683). 2024-11-13T20:05:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-13T20:05:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a687967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:37,685 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:37,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:37,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:37,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39ec1000, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:37,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:37,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:37,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:37,687 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:37,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75ef5788, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:37,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:37,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:37,690 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:37,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:37,691 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7708f75d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:37,694 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:37,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:37,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:37,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69fc3e42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:37,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:37,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:37,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:37,696 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:37,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de05c97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:37,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:37,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:37,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34710, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:37,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:37,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:37,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36188, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:37,702 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:37,703 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T20:05:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:05:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-13T20:05:37,706 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T20:05:37,707 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:37,709 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:37,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742205_1381 (size=185) 2024-11-13T20:05:37,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742205_1381 (size=185) 2024-11-13T20:05:37,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742205_1381 (size=185) 2024-11-13T20:05:37,721 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:37,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c}] 2024-11-13T20:05:37,723 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,723 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T20:05:37,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-13T20:05:37,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-13T20:05:37,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for c8ceb9a9f48fcf070601c17b39a1580c: 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for 75207323ae6a8438fb6987eadd1d7fbd: 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:37,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:37,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742206_1382 (size=76) 2024-11-13T20:05:37,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742207_1383 (size=76) 2024-11-13T20:05:37,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742207_1383 (size=76) 2024-11-13T20:05:37,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742206_1382 (size=76) 2024-11-13T20:05:37,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742206_1382 (size=76) 2024-11-13T20:05:37,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:37,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-13T20:05:37,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:37,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-13T20:05:37,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742207_1383 (size=76) 2024-11-13T20:05:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-13T20:05:37,885 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,885 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-13T20:05:37,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,885 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:37,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c in 164 msec 2024-11-13T20:05:37,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-13T20:05:37,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd in 164 msec 2024-11-13T20:05:37,888 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:37,889 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:37,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:37,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:37,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:37,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:05:37,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742208_1384 (size=68) 2024-11-13T20:05:37,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742208_1384 (size=68) 2024-11-13T20:05:37,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742208_1384 (size=68) 2024-11-13T20:05:37,908 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:37,908 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:37,908 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:37,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742209_1385 (size=673) 2024-11-13T20:05:37,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742209_1385 (size=673) 2024-11-13T20:05:37,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742209_1385 (size=673) 2024-11-13T20:05:37,951 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:37,956 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:37,957 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:37,959 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:37,959 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-13T20:05:37,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 255 msec 2024-11-13T20:05:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T20:05:38,025 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T20:05:38,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:38,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:38,034 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:38,037 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-13T20:05:38,037 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:38,037 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:38,039 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:38,044 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:38,049 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T20:05:38,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:05:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528338051 (current time:1731528338051). 2024-11-13T20:05:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-13T20:05:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37e08d96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:38,054 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:38,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:38,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:38,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a56676, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:38,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:38,055 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:38,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66a12531, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:38,058 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:38,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357. 2024-11-13T20:05:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:38,059 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b21ef45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:38,061 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:38,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:38,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:38,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@621f0acc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:38,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:38,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:38,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:38,062 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46216, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:38,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@98c3c97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:38,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:38,064 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:38,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:38,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:38,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36204, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:38,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357. 2024-11-13T20:05:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T20:05:38,068 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T20:05:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-13T20:05:38,070 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T20:05:38,071 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:38,074 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:38,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742210_1386 (size=180) 2024-11-13T20:05:38,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742210_1386 (size=180) 2024-11-13T20:05:38,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742210_1386 (size=180) 2024-11-13T20:05:38,081 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:38,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c}] 2024-11-13T20:05:38,082 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:38,082 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T20:05:38,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-13T20:05:38,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-13T20:05:38,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:38,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:38,235 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing c8ceb9a9f48fcf070601c17b39a1580c 1/1 column families, dataSize=2.74 KB heapSize=6.16 KB 2024-11-13T20:05:38,235 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing 75207323ae6a8438fb6987eadd1d7fbd 1/1 column families, dataSize=534 B heapSize=1.38 KB 2024-11-13T20:05:38,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c is 71, key is 1215e6dc86c8714892b44ff57f66d7f7/cf:q/1731528338033/Put/seqid=0 2024-11-13T20:05:38,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd is 71, key is 00e38641a8244fad8311cd749eef19fb/cf:q/1731528338030/Put/seqid=0 2024-11-13T20:05:38,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742211_1387 (size=7822) 2024-11-13T20:05:38,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742211_1387 (size=7822) 2024-11-13T20:05:38,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742211_1387 (size=7822) 2024-11-13T20:05:38,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:38,310 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:38,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/.tmp/cf/b7fe6635e7a4455197a2ba67b6b92bad, store: [table=testtb-testEmptyExportFileSystemState family=cf region=c8ceb9a9f48fcf070601c17b39a1580c] 2024-11-13T20:05:38,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/.tmp/cf/b7fe6635e7a4455197a2ba67b6b92bad is 214, key is 10e52546966fbaa7efaeb3bcf7b156d70/cf:q/1731528338033/Put/seqid=0 2024-11-13T20:05:38,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742212_1388 (size=5452) 2024-11-13T20:05:38,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742212_1388 (size=5452) 2024-11-13T20:05:38,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742212_1388 (size=5452) 2024-11-13T20:05:38,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:38,324 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:38,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/.tmp/cf/9f4ecf624cdd4ef6b5344a42dab354cd, store: [table=testtb-testEmptyExportFileSystemState family=cf region=75207323ae6a8438fb6987eadd1d7fbd] 2024-11-13T20:05:38,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/.tmp/cf/9f4ecf624cdd4ef6b5344a42dab354cd is 214, key is 0f0c55df72c813320d04897d501910830/cf:q/1731528338030/Put/seqid=0 2024-11-13T20:05:38,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742213_1389 (size=14189) 2024-11-13T20:05:38,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742213_1389 (size=14189) 2024-11-13T20:05:38,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742213_1389 (size=14189) 2024-11-13T20:05:38,346 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/.tmp/cf/b7fe6635e7a4455197a2ba67b6b92bad 2024-11-13T20:05:38,351 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/.tmp/cf/b7fe6635e7a4455197a2ba67b6b92bad as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf/b7fe6635e7a4455197a2ba67b6b92bad 2024-11-13T20:05:38,356 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf/b7fe6635e7a4455197a2ba67b6b92bad, entries=42, sequenceid=6, filesize=13.9 K 2024-11-13T20:05:38,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~2.74 KB/2802, heapSize ~6.14 KB/6288, currentSize=0 B/0 for c8ceb9a9f48fcf070601c17b39a1580c in 122ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:38,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-13T20:05:38,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for c8ceb9a9f48fcf070601c17b39a1580c: 2024-11-13T20:05:38,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-13T20:05:38,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:38,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf/b7fe6635e7a4455197a2ba67b6b92bad] hfiles 2024-11-13T20:05:38,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf/b7fe6635e7a4455197a2ba67b6b92bad for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742214_1390 (size=6996) 2024-11-13T20:05:38,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742214_1390 (size=6996) 2024-11-13T20:05:38,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742214_1390 (size=6996) 2024-11-13T20:05:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742215_1391 (size=115) 2024-11-13T20:05:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742215_1391 (size=115) 2024-11-13T20:05:38,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742215_1391 (size=115) 2024-11-13T20:05:38,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:38,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-13T20:05:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-13T20:05:38,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:38,379 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:38,382 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c in 299 msec 2024-11-13T20:05:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T20:05:38,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T20:05:38,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=534, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/.tmp/cf/9f4ecf624cdd4ef6b5344a42dab354cd 2024-11-13T20:05:38,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/.tmp/cf/9f4ecf624cdd4ef6b5344a42dab354cd as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf/9f4ecf624cdd4ef6b5344a42dab354cd 2024-11-13T20:05:38,778 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf/9f4ecf624cdd4ef6b5344a42dab354cd, entries=8, sequenceid=6, filesize=6.8 K 2024-11-13T20:05:38,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~534 B/534, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 75207323ae6a8438fb6987eadd1d7fbd in 544ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:38,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for 75207323ae6a8438fb6987eadd1d7fbd: 2024-11-13T20:05:38,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-13T20:05:38,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,779 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:38,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf/9f4ecf624cdd4ef6b5344a42dab354cd] hfiles 2024-11-13T20:05:38,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf/9f4ecf624cdd4ef6b5344a42dab354cd for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742216_1392 (size=115) 2024-11-13T20:05:38,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742216_1392 (size=115) 2024-11-13T20:05:38,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742216_1392 (size=115) 2024-11-13T20:05:38,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:38,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-13T20:05:38,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-13T20:05:38,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:38,795 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:38,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-13T20:05:38,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd in 715 msec 2024-11-13T20:05:38,798 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:38,799 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:38,802 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:38,802 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:38,802 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:38,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd] hfiles 2024-11-13T20:05:38,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:38,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742217_1393 (size=299) 2024-11-13T20:05:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742217_1393 (size=299) 2024-11-13T20:05:38,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742217_1393 (size=299) 2024-11-13T20:05:38,835 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:38,835 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,836 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,853 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:05:38,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742218_1394 (size=983) 2024-11-13T20:05:38,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742218_1394 (size=983) 2024-11-13T20:05:38,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742218_1394 (size=983) 2024-11-13T20:05:38,928 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:38,947 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:38,948 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:38,950 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:38,950 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-13T20:05:38,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 881 msec 2024-11-13T20:05:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T20:05:39,206 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T20:05:39,206 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206 2024-11-13T20:05:39,206 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:39,252 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:39,252 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:39,254 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:05:39,260 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:39,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742219_1395 (size=185) 2024-11-13T20:05:39,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742219_1395 (size=185) 2024-11-13T20:05:39,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742219_1395 (size=185) 2024-11-13T20:05:39,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742220_1396 (size=673) 2024-11-13T20:05:39,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742220_1396 (size=673) 2024-11-13T20:05:39,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742220_1396 (size=673) 2024-11-13T20:05:39,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:39,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:39,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-13T20:05:40,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-13T20:05:40,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-13T20:05:40,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-16904831825984831093.jar 2024-11-13T20:05:40,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-13657980905412245762.jar 2024-11-13T20:05:40,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,771 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:40,772 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:05:40,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:05:40,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:05:40,773 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:05:40,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:05:40,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:05:40,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:05:40,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:05:40,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:05:40,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:05:40,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:05:40,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:40,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:40,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:40,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:40,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:40,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:40,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:40,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742221_1397 (size=131440) 2024-11-13T20:05:40,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742221_1397 (size=131440) 2024-11-13T20:05:40,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742221_1397 (size=131440) 2024-11-13T20:05:40,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742222_1398 (size=4188619) 2024-11-13T20:05:40,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742222_1398 (size=4188619) 2024-11-13T20:05:40,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742222_1398 (size=4188619) 2024-11-13T20:05:40,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742223_1399 (size=1323991) 2024-11-13T20:05:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742223_1399 (size=1323991) 2024-11-13T20:05:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742223_1399 (size=1323991) 2024-11-13T20:05:40,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742224_1400 (size=903730) 2024-11-13T20:05:40,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742224_1400 (size=903730) 2024-11-13T20:05:40,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742224_1400 (size=903730) 2024-11-13T20:05:40,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742225_1401 (size=8360083) 2024-11-13T20:05:40,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742225_1401 (size=8360083) 2024-11-13T20:05:40,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742225_1401 (size=8360083) 2024-11-13T20:05:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742226_1402 (size=1877034) 2024-11-13T20:05:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742226_1402 (size=1877034) 2024-11-13T20:05:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742226_1402 (size=1877034) 2024-11-13T20:05:40,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742227_1403 (size=77835) 2024-11-13T20:05:40,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742227_1403 (size=77835) 2024-11-13T20:05:40,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742227_1403 (size=77835) 2024-11-13T20:05:41,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742228_1404 (size=30949) 2024-11-13T20:05:41,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742228_1404 (size=30949) 2024-11-13T20:05:41,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742228_1404 (size=30949) 2024-11-13T20:05:41,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742229_1405 (size=1597327) 2024-11-13T20:05:41,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742229_1405 (size=1597327) 2024-11-13T20:05:41,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742229_1405 (size=1597327) 2024-11-13T20:05:41,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742230_1406 (size=4695811) 2024-11-13T20:05:41,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742230_1406 (size=4695811) 2024-11-13T20:05:41,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742230_1406 (size=4695811) 2024-11-13T20:05:41,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742231_1407 (size=232957) 2024-11-13T20:05:41,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742231_1407 (size=232957) 2024-11-13T20:05:41,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742231_1407 (size=232957) 2024-11-13T20:05:41,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742232_1408 (size=127628) 2024-11-13T20:05:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742232_1408 (size=127628) 2024-11-13T20:05:41,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742232_1408 (size=127628) 2024-11-13T20:05:41,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742233_1409 (size=20406) 2024-11-13T20:05:41,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742233_1409 (size=20406) 2024-11-13T20:05:41,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742233_1409 (size=20406) 2024-11-13T20:05:41,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742234_1410 (size=5175431) 2024-11-13T20:05:41,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742234_1410 (size=5175431) 2024-11-13T20:05:41,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742234_1410 (size=5175431) 2024-11-13T20:05:41,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742235_1411 (size=440392) 2024-11-13T20:05:41,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742235_1411 (size=440392) 2024-11-13T20:05:41,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742235_1411 (size=440392) 2024-11-13T20:05:41,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742236_1412 (size=217634) 2024-11-13T20:05:41,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742236_1412 (size=217634) 2024-11-13T20:05:41,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742236_1412 (size=217634) 2024-11-13T20:05:41,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742237_1413 (size=1832290) 2024-11-13T20:05:41,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742237_1413 (size=1832290) 2024-11-13T20:05:41,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742237_1413 (size=1832290) 2024-11-13T20:05:41,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742238_1414 (size=322274) 2024-11-13T20:05:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742238_1414 (size=322274) 2024-11-13T20:05:41,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742238_1414 (size=322274) 2024-11-13T20:05:41,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742239_1415 (size=503880) 2024-11-13T20:05:41,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742239_1415 (size=503880) 2024-11-13T20:05:41,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742239_1415 (size=503880) 2024-11-13T20:05:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742240_1416 (size=6424542) 2024-11-13T20:05:41,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742240_1416 (size=6424542) 2024-11-13T20:05:41,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742240_1416 (size=6424542) 2024-11-13T20:05:41,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742241_1417 (size=29229) 2024-11-13T20:05:41,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742241_1417 (size=29229) 2024-11-13T20:05:41,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742241_1417 (size=29229) 2024-11-13T20:05:41,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742242_1418 (size=24096) 2024-11-13T20:05:41,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742242_1418 (size=24096) 2024-11-13T20:05:41,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742242_1418 (size=24096) 2024-11-13T20:05:41,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742243_1419 (size=111872) 2024-11-13T20:05:41,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742243_1419 (size=111872) 2024-11-13T20:05:41,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742243_1419 (size=111872) 2024-11-13T20:05:41,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742244_1420 (size=45609) 2024-11-13T20:05:41,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742244_1420 (size=45609) 2024-11-13T20:05:41,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742244_1420 (size=45609) 2024-11-13T20:05:41,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742245_1421 (size=136454) 2024-11-13T20:05:41,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742245_1421 (size=136454) 2024-11-13T20:05:41,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742245_1421 (size=136454) 2024-11-13T20:05:41,542 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:05:41,544 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-13T20:05:41,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742246_1422 (size=7) 2024-11-13T20:05:41,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742246_1422 (size=7) 2024-11-13T20:05:41,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742246_1422 (size=7) 2024-11-13T20:05:41,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742247_1423 (size=10) 2024-11-13T20:05:41,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742247_1423 (size=10) 2024-11-13T20:05:41,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742247_1423 (size=10) 2024-11-13T20:05:41,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742248_1424 (size=303635) 2024-11-13T20:05:41,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742248_1424 (size=303635) 2024-11-13T20:05:41,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742248_1424 (size=303635) 2024-11-13T20:05:41,686 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:05:41,686 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:05:42,040 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0007_000001 (auth:SIMPLE) from 127.0.0.1:39274 2024-11-13T20:05:42,204 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:05:44,404 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 02ff9aeb72cf9b21fd2a7dfcab36dd97 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:05:44,404 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 75207323ae6a8438fb6987eadd1d7fbd changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:05:44,404 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b2c0df47f245978cdca563d59f9f3349 changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:05:44,404 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c8ceb9a9f48fcf070601c17b39a1580c changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:05:50,639 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0007_000001 (auth:SIMPLE) from 127.0.0.1:59738 2024-11-13T20:05:51,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742249_1425 (size=349261) 2024-11-13T20:05:51,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742249_1425 (size=349261) 2024-11-13T20:05:51,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742249_1425 (size=349261) 2024-11-13T20:05:51,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742250_1426 (size=8568) 2024-11-13T20:05:51,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742250_1426 (size=8568) 2024-11-13T20:05:51,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742250_1426 (size=8568) 2024-11-13T20:05:51,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742251_1427 (size=460) 2024-11-13T20:05:51,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742251_1427 (size=460) 2024-11-13T20:05:51,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742251_1427 (size=460) 2024-11-13T20:05:51,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742252_1428 (size=8568) 2024-11-13T20:05:51,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742252_1428 (size=8568) 2024-11-13T20:05:51,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742252_1428 (size=8568) 2024-11-13T20:05:51,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742253_1429 (size=349261) 2024-11-13T20:05:51,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742253_1429 (size=349261) 2024-11-13T20:05:51,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742253_1429 (size=349261) 2024-11-13T20:05:53,129 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:05:53,130 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:05:53,136 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:53,136 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:05:53,137 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:05:53,137 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:53,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-13T20:05:53,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-13T20:05:53,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:53,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-13T20:05:53,138 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528339206/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-13T20:05:53,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T20:05:53,154 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528353153"}]},"ts":"1731528353153"} 2024-11-13T20:05:53,157 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-13T20:05:53,157 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-13T20:05:53,158 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-13T20:05:53,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, UNASSIGN}] 2024-11-13T20:05:53,161 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, UNASSIGN 2024-11-13T20:05:53,161 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, UNASSIGN 2024-11-13T20:05:53,162 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=c8ceb9a9f48fcf070601c17b39a1580c, regionState=CLOSING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:53,162 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=75207323ae6a8438fb6987eadd1d7fbd, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:05:53,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, UNASSIGN because future has completed 2024-11-13T20:05:53,164 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:53,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:05:53,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, UNASSIGN because future has completed 2024-11-13T20:05:53,165 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:05:53,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:05:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T20:05:53,317 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:53,317 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:53,317 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing c8ceb9a9f48fcf070601c17b39a1580c, disabling compactions & flushes 2024-11-13T20:05:53,317 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:53,317 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. after waiting 0 ms 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:53,318 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing 75207323ae6a8438fb6987eadd1d7fbd, disabling compactions & flushes 2024-11-13T20:05:53,318 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. after waiting 0 ms 2024-11-13T20:05:53,318 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:53,322 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:05:53,323 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:05:53,323 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:53,323 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c. 2024-11-13T20:05:53,323 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for c8ceb9a9f48fcf070601c17b39a1580c: Waiting for close lock at 1731528353317Running coprocessor pre-close hooks at 1731528353317Disabling compacts and flushes for region at 1731528353317Disabling writes for close at 1731528353318 (+1 ms)Writing region close event to WAL at 1731528353318Running coprocessor post-close hooks at 1731528353323 (+5 ms)Closed at 1731528353323 2024-11-13T20:05:53,323 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:05:53,323 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd. 2024-11-13T20:05:53,323 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for 75207323ae6a8438fb6987eadd1d7fbd: Waiting for close lock at 1731528353318Running coprocessor pre-close hooks at 1731528353318Disabling compacts and flushes for region at 1731528353318Disabling writes for close at 1731528353318Writing region close event to WAL at 1731528353319 (+1 ms)Running coprocessor post-close hooks at 1731528353323 (+4 ms)Closed at 1731528353323 2024-11-13T20:05:53,325 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:53,326 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=c8ceb9a9f48fcf070601c17b39a1580c, regionState=CLOSED 2024-11-13T20:05:53,326 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed 75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:53,326 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=75207323ae6a8438fb6987eadd1d7fbd, regionState=CLOSED 2024-11-13T20:05:53,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:05:53,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:05:53,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-13T20:05:53,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure c8ceb9a9f48fcf070601c17b39a1580c, server=54d0784d1943,39533,1731528161273 in 164 msec 2024-11-13T20:05:53,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-13T20:05:53,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure 75207323ae6a8438fb6987eadd1d7fbd, server=54d0784d1943,36439,1731528160987 in 164 msec 2024-11-13T20:05:53,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c8ceb9a9f48fcf070601c17b39a1580c, UNASSIGN in 172 msec 2024-11-13T20:05:53,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-13T20:05:53,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75207323ae6a8438fb6987eadd1d7fbd, UNASSIGN in 172 msec 2024-11-13T20:05:53,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-13T20:05:53,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 176 msec 2024-11-13T20:05:53,337 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528353337"}]},"ts":"1731528353337"} 2024-11-13T20:05:53,338 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-13T20:05:53,338 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-13T20:05:53,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 190 msec 2024-11-13T20:05:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T20:05:53,475 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T20:05:53,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,478 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,479 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,484 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,484 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:53,484 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:53,486 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/recovered.edits] 2024-11-13T20:05:53,486 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/recovered.edits] 2024-11-13T20:05:53,490 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf/b7fe6635e7a4455197a2ba67b6b92bad to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/cf/b7fe6635e7a4455197a2ba67b6b92bad 2024-11-13T20:05:53,490 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf/9f4ecf624cdd4ef6b5344a42dab354cd to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/cf/9f4ecf624cdd4ef6b5344a42dab354cd 2024-11-13T20:05:53,493 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c/recovered.edits/9.seqid 2024-11-13T20:05:53,494 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd/recovered.edits/9.seqid 2024-11-13T20:05:53,494 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:53,494 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testEmptyExportFileSystemState/75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:53,494 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-13T20:05:53,495 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-13T20:05:53,495 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-11-13T20:05:53,498 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024111316e44b0f40c345b4a0ab02c3344632a8_c8ceb9a9f48fcf070601c17b39a1580c 2024-11-13T20:05:53,500 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e202411131acc5a40de80487484cf7da05c200a69_75207323ae6a8438fb6987eadd1d7fbd 2024-11-13T20:05:53,501 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-13T20:05:53,510 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,513 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-13T20:05:53,516 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-13T20:05:53,521 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,521 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-13T20:05:53,522 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528353521"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:53,522 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528353521"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:53,525 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:05:53,525 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 75207323ae6a8438fb6987eadd1d7fbd, NAME => 'testtb-testEmptyExportFileSystemState,,1731528337031.75207323ae6a8438fb6987eadd1d7fbd.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c8ceb9a9f48fcf070601c17b39a1580c, NAME => 'testtb-testEmptyExportFileSystemState,1,1731528337031.c8ceb9a9f48fcf070601c17b39a1580c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:05:53,526 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-13T20:05:53,526 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528353526"}]},"ts":"9223372036854775807"} 2024-11-13T20:05:53,528 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-13T20:05:53,529 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 53 msec 2024-11-13T20:05:53,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T20:05:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T20:05:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T20:05:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T20:05:53,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:53,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-13T20:05:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:53,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:53,662 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-13T20:05:53,662 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T20:05:53,663 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:53,663 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:53,664 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:53,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:53,678 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-13T20:05:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:53,685 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-13T20:05:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-13T20:05:53,716 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=791 (was 781) Potentially hanging thread: Thread-5614 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:35355 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:46568 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1458188864_1 at /127.0.0.1:50132 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 11324) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:46124 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41549 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35355 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:50160 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=784 (was 765) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=691 (was 714), ProcessCount=18 (was 12) - ProcessCount LEAK? -, AvailableMemoryMB=2068 (was 3342) 2024-11-13T20:05:53,717 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-13T20:05:53,743 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=791, OpenFileDescriptor=784, MaxFileDescriptor=1048576, SystemLoadAverage=691, ProcessCount=18, AvailableMemoryMB=2065 2024-11-13T20:05:53,743 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-13T20:05:53,745 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:05:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:05:53,747 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:05:53,747 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-13T20:05:53,748 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:05:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T20:05:53,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742254_1430 (size=440) 2024-11-13T20:05:53,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742254_1430 (size=440) 2024-11-13T20:05:53,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742254_1430 (size=440) 2024-11-13T20:05:53,770 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d91fda21e751725ceff2a6bf6a3f748e, NAME => 'testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:53,771 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 299c0b531e11d4010454cb822587984b, NAME => 'testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:53,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742255_1431 (size=65) 2024-11-13T20:05:53,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742255_1431 (size=65) 2024-11-13T20:05:53,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742255_1431 (size=65) 2024-11-13T20:05:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742256_1432 (size=65) 2024-11-13T20:05:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742256_1432 (size=65) 2024-11-13T20:05:53,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742256_1432 (size=65) 2024-11-13T20:05:53,799 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:53,799 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 299c0b531e11d4010454cb822587984b, disabling compactions & flushes 2024-11-13T20:05:53,799 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:53,799 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:53,799 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. after waiting 0 ms 2024-11-13T20:05:53,799 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:53,799 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:53,799 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 299c0b531e11d4010454cb822587984b: Waiting for close lock at 1731528353799Disabling compacts and flushes for region at 1731528353799Disabling writes for close at 1731528353799Writing region close event to WAL at 1731528353799Closed at 1731528353799 2024-11-13T20:05:53,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T20:05:54,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T20:05:54,198 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:54,198 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing d91fda21e751725ceff2a6bf6a3f748e, disabling compactions & flushes 2024-11-13T20:05:54,198 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,198 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,198 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. after waiting 0 ms 2024-11-13T20:05:54,198 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,198 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,198 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for d91fda21e751725ceff2a6bf6a3f748e: Waiting for close lock at 1731528354198Disabling compacts and flushes for region at 1731528354198Disabling writes for close at 1731528354198Writing region close event to WAL at 1731528354198Closed at 1731528354198 2024-11-13T20:05:54,199 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:05:54,200 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731528354199"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528354199"}]},"ts":"1731528354199"} 2024-11-13T20:05:54,200 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731528354199"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528354199"}]},"ts":"1731528354199"} 2024-11-13T20:05:54,202 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:05:54,203 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:05:54,203 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528354203"}]},"ts":"1731528354203"} 2024-11-13T20:05:54,205 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-13T20:05:54,205 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:05:54,206 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:05:54,206 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:05:54,206 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:05:54,206 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:05:54,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, ASSIGN}] 2024-11-13T20:05:54,208 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, ASSIGN 2024-11-13T20:05:54,208 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, ASSIGN 2024-11-13T20:05:54,209 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:05:54,209 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, ASSIGN; state=OFFLINE, location=54d0784d1943,39533,1731528161273; forceNewPlan=false, retain=false 2024-11-13T20:05:54,360 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:05:54,360 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=d91fda21e751725ceff2a6bf6a3f748e, regionState=OPENING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:54,361 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=299c0b531e11d4010454cb822587984b, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:54,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, ASSIGN because future has completed 2024-11-13T20:05:54,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, ASSIGN because future has completed 2024-11-13T20:05:54,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure d91fda21e751725ceff2a6bf6a3f748e, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:05:54,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 299c0b531e11d4010454cb822587984b, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:05:54,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T20:05:54,541 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,541 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => d91fda21e751725ceff2a6bf6a3f748e, NAME => 'testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:05:54,541 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. service=AccessControlService 2024-11-13T20:05:54,542 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:54,542 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,542 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:54,542 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,542 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,544 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:54,544 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => 299c0b531e11d4010454cb822587984b, NAME => 'testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:05:54,545 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. service=AccessControlService 2024-11-13T20:05:54,545 INFO [StoreOpener-d91fda21e751725ceff2a6bf6a3f748e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,545 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:05:54,545 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,545 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:05:54,545 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,545 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,546 INFO [StoreOpener-d91fda21e751725ceff2a6bf6a3f748e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d91fda21e751725ceff2a6bf6a3f748e columnFamilyName cf 2024-11-13T20:05:54,547 INFO [StoreOpener-299c0b531e11d4010454cb822587984b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,547 DEBUG [StoreOpener-d91fda21e751725ceff2a6bf6a3f748e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:54,548 INFO [StoreOpener-d91fda21e751725ceff2a6bf6a3f748e-1 {}] regionserver.HStore(327): Store=d91fda21e751725ceff2a6bf6a3f748e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:54,548 INFO [StoreOpener-299c0b531e11d4010454cb822587984b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 299c0b531e11d4010454cb822587984b columnFamilyName cf 2024-11-13T20:05:54,548 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,548 DEBUG [StoreOpener-299c0b531e11d4010454cb822587984b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:54,549 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,549 INFO [StoreOpener-299c0b531e11d4010454cb822587984b-1 {}] regionserver.HStore(327): Store=299c0b531e11d4010454cb822587984b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:05:54,549 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,549 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,549 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,549 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,549 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,550 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,550 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,550 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,551 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,552 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,553 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:54,553 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened d91fda21e751725ceff2a6bf6a3f748e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67063383, jitterRate=-6.777197122573853E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:54,553 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,554 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for d91fda21e751725ceff2a6bf6a3f748e: Running coprocessor pre-open hook at 1731528354542Writing region info on filesystem at 1731528354542Initializing all the Stores at 1731528354544 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528354544Cleaning up temporary data from old regions at 1731528354549 (+5 ms)Running coprocessor post-open hooks at 1731528354553 (+4 ms)Region opened successfully at 1731528354554 (+1 ms) 2024-11-13T20:05:54,555 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e., pid=178, masterSystemTime=1731528354536 2024-11-13T20:05:54,557 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,557 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,558 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=d91fda21e751725ceff2a6bf6a3f748e, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:05:54,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure d91fda21e751725ceff2a6bf6a3f748e, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:05:54,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=176 2024-11-13T20:05:54,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure d91fda21e751725ceff2a6bf6a3f748e, server=54d0784d1943,39533,1731528161273 in 192 msec 2024-11-13T20:05:54,566 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:05:54,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, ASSIGN in 359 msec 2024-11-13T20:05:54,573 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened 299c0b531e11d4010454cb822587984b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63101265, jitterRate=-0.05971787869930267}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:05:54,573 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:54,574 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for 299c0b531e11d4010454cb822587984b: Running coprocessor pre-open hook at 1731528354545Writing region info on filesystem at 1731528354545Initializing all the Stores at 1731528354546 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528354546Cleaning up temporary data from old regions at 1731528354550 (+4 ms)Running coprocessor post-open hooks at 1731528354573 (+23 ms)Region opened successfully at 1731528354574 (+1 ms) 2024-11-13T20:05:54,581 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b., pid=179, masterSystemTime=1731528354539 2024-11-13T20:05:54,588 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:54,589 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:54,589 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=299c0b531e11d4010454cb822587984b, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:05:54,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 299c0b531e11d4010454cb822587984b, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:05:54,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-13T20:05:54,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure 299c0b531e11d4010454cb822587984b, server=54d0784d1943,33333,1731528161197 in 220 msec 2024-11-13T20:05:54,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=177, resume processing ppid=175 2024-11-13T20:05:54,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, ASSIGN in 388 msec 2024-11-13T20:05:54,597 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:05:54,597 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528354597"}]},"ts":"1731528354597"} 2024-11-13T20:05:54,602 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-13T20:05:54,602 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:05:54,602 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-13T20:05:54,605 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T20:05:54,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:54,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:54,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:54,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:05:54,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,614 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T20:05:54,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 869 msec 2024-11-13T20:05:54,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T20:05:54,885 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T20:05:54,885 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:54,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-13T20:05:54,889 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:54,889 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:54,891 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:54,897 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:54,906 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:54,914 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T20:05:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528354914 (current time:1731528354914). 2024-11-13T20:05:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-13T20:05:54,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:54,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a862483, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:54,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:54,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:54,921 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:54,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:54,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:54,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@225dd94f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:54,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:54,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:54,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:54,927 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35994, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:54,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2bd0f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:54,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:54,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:54,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:54,931 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:54,933 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:54,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:54,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:54,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:54,934 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:54,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dc461c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:54,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:54,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:54,938 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:54,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:54,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:54,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@532ea4d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:54,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:54,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:54,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:54,940 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:54,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e08cfa0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:54,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:54,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:54,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:54,943 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:54,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:54,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:54,946 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:54,947 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:54,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:54,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:54,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:54,947 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:54,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T20:05:54,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:54,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T20:05:54,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-13T20:05:54,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T20:05:54,950 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:54,952 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:54,958 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:54,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742257_1433 (size=161) 2024-11-13T20:05:54,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742257_1433 (size=161) 2024-11-13T20:05:54,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742257_1433 (size=161) 2024-11-13T20:05:54,972 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:54,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b}] 2024-11-13T20:05:54,973 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:54,973 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:55,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T20:05:55,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-13T20:05:55,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-13T20:05:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 299c0b531e11d4010454cb822587984b: 2024-11-13T20:05:55,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. for emptySnaptb0-testExportWithChecksum completed. 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for d91fda21e751725ceff2a6bf6a3f748e: 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. for emptySnaptb0-testExportWithChecksum completed. 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:55,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:05:55,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742258_1434 (size=68) 2024-11-13T20:05:55,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742258_1434 (size=68) 2024-11-13T20:05:55,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:55,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-13T20:05:55,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742259_1435 (size=68) 2024-11-13T20:05:55,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742259_1435 (size=68) 2024-11-13T20:05:55,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742259_1435 (size=68) 2024-11-13T20:05:55,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742258_1434 (size=68) 2024-11-13T20:05:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-13T20:05:55,145 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:55,146 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:55,148 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b in 175 msec 2024-11-13T20:05:55,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T20:05:55,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:55,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-13T20:05:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-13T20:05:55,546 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:55,546 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:55,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-11-13T20:05:55,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e in 575 msec 2024-11-13T20:05:55,549 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:55,550 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:55,551 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:55,551 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:55,551 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:55,552 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:05:55,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742260_1436 (size=60) 2024-11-13T20:05:55,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742260_1436 (size=60) 2024-11-13T20:05:55,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742260_1436 (size=60) 2024-11-13T20:05:55,573 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:55,574 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-13T20:05:55,574 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-13T20:05:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T20:05:55,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742261_1437 (size=641) 2024-11-13T20:05:55,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742261_1437 (size=641) 2024-11-13T20:05:55,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742261_1437 (size=641) 2024-11-13T20:05:55,639 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:55,647 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:55,648 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-13T20:05:55,650 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:55,650 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-13T20:05:55,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 702 msec 2024-11-13T20:05:56,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T20:05:56,085 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T20:05:56,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:56,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:05:56,100 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:56,103 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-13T20:05:56,103 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:56,103 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:05:56,105 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:56,112 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:56,121 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T20:05:56,133 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T20:05:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528356133 (current time:1731528356133). 2024-11-13T20:05:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:05:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-13T20:05:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:05:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aaf81a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:56,137 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:56,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:56,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:56,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b4d71f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:56,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:56,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:56,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:56,139 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36010, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:56,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71b64a46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:56,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:56,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:56,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:56,143 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:56,145 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:56,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:56,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:56,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:56,146 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59a72025, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:05:56,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:05:56,147 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:05:56,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:05:56,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:05:56,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543ebf51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:56,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:05:56,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:05:56,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:56,148 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:05:56,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58d27fba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:05:56,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:05:56,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:05:56,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:56,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49080, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:56,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:05:56,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:05:56,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49172, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:05:56,155 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:05:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:05:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:05:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T20:05:56,156 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:05:56,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:05:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T20:05:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-13T20:05:56,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T20:05:56,184 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:05:56,186 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:05:56,189 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:05:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742262_1438 (size=156) 2024-11-13T20:05:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742262_1438 (size=156) 2024-11-13T20:05:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742262_1438 (size=156) 2024-11-13T20:05:56,213 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:05:56,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b}] 2024-11-13T20:05:56,215 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:56,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:56,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T20:05:56,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-13T20:05:56,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:56,367 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing 299c0b531e11d4010454cb822587984b 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-13T20:05:56,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39533 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-13T20:05:56,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:56,368 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing d91fda21e751725ceff2a6bf6a3f748e 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-13T20:05:56,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e is 71, key is 017dc77ed2b6d9b12d1e1b61a7b75ae8/cf:q/1731528356098/Put/seqid=0 2024-11-13T20:05:56,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b is 71, key is 1c5ff627aad6202b27fd86b413f89f35/cf:q/1731528356095/Put/seqid=0 2024-11-13T20:05:56,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742264_1440 (size=7892) 2024-11-13T20:05:56,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742264_1440 (size=7892) 2024-11-13T20:05:56,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742264_1440 (size=7892) 2024-11-13T20:05:56,411 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:56,416 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b 2024-11-13T20:05:56,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/.tmp/cf/ae80db9ea1f84b62b00c600e5943bd0d, store: [table=testtb-testExportWithChecksum family=cf region=299c0b531e11d4010454cb822587984b] 2024-11-13T20:05:56,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/.tmp/cf/ae80db9ea1f84b62b00c600e5943bd0d is 206, key is 122c1bc34f67e23689b5acc87b778b292/cf:q/1731528356095/Put/seqid=0 2024-11-13T20:05:56,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742263_1439 (size=5381) 2024-11-13T20:05:56,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742263_1439 (size=5381) 2024-11-13T20:05:56,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742263_1439 (size=5381) 2024-11-13T20:05:56,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:56,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/.tmp/cf/788f6ded517f4cbfb03549065f2d1d0e, store: [table=testtb-testExportWithChecksum family=cf region=d91fda21e751725ceff2a6bf6a3f748e] 2024-11-13T20:05:56,442 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/.tmp/cf/788f6ded517f4cbfb03549065f2d1d0e is 206, key is 0562c13263c853b286897391ce68094da/cf:q/1731528356098/Put/seqid=0 2024-11-13T20:05:56,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742265_1441 (size=14047) 2024-11-13T20:05:56,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742265_1441 (size=14047) 2024-11-13T20:05:56,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742265_1441 (size=14047) 2024-11-13T20:05:56,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/.tmp/cf/ae80db9ea1f84b62b00c600e5943bd0d 2024-11-13T20:05:56,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/.tmp/cf/ae80db9ea1f84b62b00c600e5943bd0d as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d 2024-11-13T20:05:56,461 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d, entries=43, sequenceid=6, filesize=13.7 K 2024-11-13T20:05:56,463 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 299c0b531e11d4010454cb822587984b in 95ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:56,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-13T20:05:56,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for 299c0b531e11d4010454cb822587984b: 2024-11-13T20:05:56,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. for snaptb0-testExportWithChecksum completed. 2024-11-13T20:05:56,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-13T20:05:56,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:56,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d] hfiles 2024-11-13T20:05:56,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d for snapshot=snaptb0-testExportWithChecksum 2024-11-13T20:05:56,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742266_1442 (size=6720) 2024-11-13T20:05:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742266_1442 (size=6720) 2024-11-13T20:05:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742266_1442 (size=6720) 2024-11-13T20:05:56,476 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/.tmp/cf/788f6ded517f4cbfb03549065f2d1d0e 2024-11-13T20:05:56,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742267_1443 (size=107) 2024-11-13T20:05:56,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742267_1443 (size=107) 2024-11-13T20:05:56,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742267_1443 (size=107) 2024-11-13T20:05:56,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:05:56,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/.tmp/cf/788f6ded517f4cbfb03549065f2d1d0e as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf/788f6ded517f4cbfb03549065f2d1d0e 2024-11-13T20:05:56,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-13T20:05:56,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-13T20:05:56,486 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:56,486 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b 2024-11-13T20:05:56,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 299c0b531e11d4010454cb822587984b in 274 msec 2024-11-13T20:05:56,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T20:05:56,495 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf/788f6ded517f4cbfb03549065f2d1d0e, entries=7, sequenceid=6, filesize=6.6 K 2024-11-13T20:05:56,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for d91fda21e751725ceff2a6bf6a3f748e in 129ms, sequenceid=6, compaction requested=false 2024-11-13T20:05:56,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for d91fda21e751725ceff2a6bf6a3f748e: 2024-11-13T20:05:56,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. for snaptb0-testExportWithChecksum completed. 2024-11-13T20:05:56,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-13T20:05:56,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:05:56,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf/788f6ded517f4cbfb03549065f2d1d0e] hfiles 2024-11-13T20:05:56,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf/788f6ded517f4cbfb03549065f2d1d0e for snapshot=snaptb0-testExportWithChecksum 2024-11-13T20:05:56,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742268_1444 (size=107) 2024-11-13T20:05:56,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742268_1444 (size=107) 2024-11-13T20:05:56,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742268_1444 (size=107) 2024-11-13T20:05:56,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:05:56,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-13T20:05:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-13T20:05:56,508 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:56,508 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:56,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=183 2024-11-13T20:05:56,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d91fda21e751725ceff2a6bf6a3f748e in 296 msec 2024-11-13T20:05:56,511 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:05:56,512 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:05:56,513 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:05:56,513 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:05:56,513 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:05:56,515 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e] hfiles 2024-11-13T20:05:56,515 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b 2024-11-13T20:05:56,515 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:05:56,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742269_1445 (size=291) 2024-11-13T20:05:56,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742269_1445 (size=291) 2024-11-13T20:05:56,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742269_1445 (size=291) 2024-11-13T20:05:56,529 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:05:56,529 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-13T20:05:56,530 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T20:05:56,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742270_1446 (size=951) 2024-11-13T20:05:56,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742270_1446 (size=951) 2024-11-13T20:05:56,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742270_1446 (size=951) 2024-11-13T20:05:56,572 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:05:56,578 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:05:56,578 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-13T20:05:56,580 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:05:56,580 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-13T20:05:56,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 419 msec 2024-11-13T20:05:56,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T20:05:56,805 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T20:05:56,806 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805 2024-11-13T20:05:56,806 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:56,857 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:05:56,857 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@201695cb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T20:05:56,860 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:05:56,870 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T20:05:56,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:56,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:56,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,137 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0007_000001 (auth:SIMPLE) from 127.0.0.1:42066 2024-11-13T20:05:58,186 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0007/container_1731528172678_0007_01_000001/launch_container.sh] 2024-11-13T20:05:58,186 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0007/container_1731528172678_0007_01_000001/container_tokens] 2024-11-13T20:05:58,186 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0007/container_1731528172678_0007_01_000001/sysfs] 2024-11-13T20:05:58,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-5536037080449213026.jar 2024-11-13T20:05:58,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-15447442268481572988.jar 2024-11-13T20:05:58,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:05:58,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:05:58,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:05:58,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:05:58,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:05:58,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:05:58,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:05:58,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:05:58,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:05:58,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:05:58,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:05:58,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:05:58,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:58,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:58,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:58,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:58,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:05:58,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:58,340 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:05:58,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742271_1447 (size=131440) 2024-11-13T20:05:58,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742271_1447 (size=131440) 2024-11-13T20:05:58,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742271_1447 (size=131440) 2024-11-13T20:05:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742272_1448 (size=4188619) 2024-11-13T20:05:58,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742272_1448 (size=4188619) 2024-11-13T20:05:58,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742272_1448 (size=4188619) 2024-11-13T20:05:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742273_1449 (size=6424542) 2024-11-13T20:05:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742273_1449 (size=6424542) 2024-11-13T20:05:58,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742273_1449 (size=6424542) 2024-11-13T20:05:58,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742274_1450 (size=1323991) 2024-11-13T20:05:58,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742274_1450 (size=1323991) 2024-11-13T20:05:58,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742274_1450 (size=1323991) 2024-11-13T20:05:58,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742275_1451 (size=903730) 2024-11-13T20:05:58,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742275_1451 (size=903730) 2024-11-13T20:05:58,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742275_1451 (size=903730) 2024-11-13T20:05:58,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742276_1452 (size=8360083) 2024-11-13T20:05:58,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742276_1452 (size=8360083) 2024-11-13T20:05:58,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742276_1452 (size=8360083) 2024-11-13T20:05:58,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742277_1453 (size=1877034) 2024-11-13T20:05:58,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742277_1453 (size=1877034) 2024-11-13T20:05:58,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742277_1453 (size=1877034) 2024-11-13T20:05:58,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742278_1454 (size=77835) 2024-11-13T20:05:58,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742278_1454 (size=77835) 2024-11-13T20:05:58,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742278_1454 (size=77835) 2024-11-13T20:05:58,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742279_1455 (size=30949) 2024-11-13T20:05:58,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742279_1455 (size=30949) 2024-11-13T20:05:58,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742279_1455 (size=30949) 2024-11-13T20:05:58,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742280_1456 (size=440392) 2024-11-13T20:05:58,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742280_1456 (size=440392) 2024-11-13T20:05:58,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742280_1456 (size=440392) 2024-11-13T20:05:58,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742281_1457 (size=1597327) 2024-11-13T20:05:58,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742281_1457 (size=1597327) 2024-11-13T20:05:58,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742281_1457 (size=1597327) 2024-11-13T20:05:58,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742282_1458 (size=4695811) 2024-11-13T20:05:58,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742282_1458 (size=4695811) 2024-11-13T20:05:58,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742282_1458 (size=4695811) 2024-11-13T20:05:58,860 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:05:58,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742283_1459 (size=232957) 2024-11-13T20:05:58,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742283_1459 (size=232957) 2024-11-13T20:05:58,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742283_1459 (size=232957) 2024-11-13T20:05:58,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742284_1460 (size=127628) 2024-11-13T20:05:58,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742284_1460 (size=127628) 2024-11-13T20:05:58,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742284_1460 (size=127628) 2024-11-13T20:05:58,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742285_1461 (size=20406) 2024-11-13T20:05:58,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742285_1461 (size=20406) 2024-11-13T20:05:58,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742285_1461 (size=20406) 2024-11-13T20:05:59,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742286_1462 (size=5175431) 2024-11-13T20:05:59,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742286_1462 (size=5175431) 2024-11-13T20:05:59,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742286_1462 (size=5175431) 2024-11-13T20:05:59,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742287_1463 (size=217634) 2024-11-13T20:05:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742287_1463 (size=217634) 2024-11-13T20:05:59,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742287_1463 (size=217634) 2024-11-13T20:05:59,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742288_1464 (size=1832290) 2024-11-13T20:05:59,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742288_1464 (size=1832290) 2024-11-13T20:05:59,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742288_1464 (size=1832290) 2024-11-13T20:05:59,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742289_1465 (size=322274) 2024-11-13T20:05:59,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742289_1465 (size=322274) 2024-11-13T20:05:59,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742289_1465 (size=322274) 2024-11-13T20:05:59,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742290_1466 (size=503880) 2024-11-13T20:05:59,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742290_1466 (size=503880) 2024-11-13T20:05:59,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742290_1466 (size=503880) 2024-11-13T20:05:59,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742291_1467 (size=29229) 2024-11-13T20:05:59,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742291_1467 (size=29229) 2024-11-13T20:05:59,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742291_1467 (size=29229) 2024-11-13T20:05:59,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742292_1468 (size=24096) 2024-11-13T20:05:59,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742292_1468 (size=24096) 2024-11-13T20:05:59,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742292_1468 (size=24096) 2024-11-13T20:05:59,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742293_1469 (size=111872) 2024-11-13T20:05:59,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742293_1469 (size=111872) 2024-11-13T20:05:59,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742293_1469 (size=111872) 2024-11-13T20:05:59,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742294_1470 (size=45609) 2024-11-13T20:05:59,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742294_1470 (size=45609) 2024-11-13T20:05:59,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742294_1470 (size=45609) 2024-11-13T20:05:59,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742295_1471 (size=136454) 2024-11-13T20:05:59,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742295_1471 (size=136454) 2024-11-13T20:05:59,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742295_1471 (size=136454) 2024-11-13T20:05:59,319 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:05:59,322 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-13T20:05:59,323 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T20:05:59,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742296_1472 (size=714) 2024-11-13T20:05:59,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742296_1472 (size=714) 2024-11-13T20:05:59,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742296_1472 (size=714) 2024-11-13T20:05:59,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742297_1473 (size=15) 2024-11-13T20:05:59,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742297_1473 (size=15) 2024-11-13T20:05:59,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742297_1473 (size=15) 2024-11-13T20:05:59,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742298_1474 (size=303774) 2024-11-13T20:05:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742298_1474 (size=303774) 2024-11-13T20:05:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742298_1474 (size=303774) 2024-11-13T20:05:59,392 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:05:59,392 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:06:00,142 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:42082 2024-11-13T20:06:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-13T20:06:00,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-13T20:06:00,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-13T20:06:06,111 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:06:08,853 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:06:09,874 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:41594 2024-11-13T20:06:10,410 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 02ff9aeb72cf9b21fd2a7dfcab36dd97, had cached 0 bytes from a total of 5688 2024-11-13T20:06:10,410 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b2c0df47f245978cdca563d59f9f3349, had cached 0 bytes from a total of 14861 2024-11-13T20:06:10,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742299_1475 (size=349424) 2024-11-13T20:06:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742299_1475 (size=349424) 2024-11-13T20:06:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742299_1475 (size=349424) 2024-11-13T20:06:12,317 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:34698 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805/archive/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-13T20:06:18,040 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:32794 2024-11-13T20:06:21,974 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000002/launch_container.sh] 2024-11-13T20:06:21,974 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000002/container_tokens] 2024-11-13T20:06:21,974 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000002/sysfs] 2024-11-13T20:06:23,512 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000003/launch_container.sh] 2024-11-13T20:06:23,512 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000003/container_tokens] 2024-11-13T20:06:23,512 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805/archive/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-13T20:06:25,087 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:55742 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/local-export-1731528356805/archive/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-13T20:06:31,100 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:55750 2024-11-13T20:06:34,549 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_1/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000004/launch_container.sh] 2024-11-13T20:06:34,549 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_1/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000004/container_tokens] 2024-11-13T20:06:34,549 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_1/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000004/sysfs] 2024-11-13T20:06:35,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742300_1476 (size=21330) 2024-11-13T20:06:35,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742300_1476 (size=21330) 2024-11-13T20:06:35,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742300_1476 (size=21330) 2024-11-13T20:06:35,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742301_1477 (size=460) 2024-11-13T20:06:35,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742301_1477 (size=460) 2024-11-13T20:06:35,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742301_1477 (size=460) 2024-11-13T20:06:35,290 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000005/launch_container.sh] 2024-11-13T20:06:35,290 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000005/container_tokens] 2024-11-13T20:06:35,290 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000005/sysfs] 2024-11-13T20:06:35,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742302_1478 (size=21330) 2024-11-13T20:06:35,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742302_1478 (size=21330) 2024-11-13T20:06:35,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742302_1478 (size=21330) 2024-11-13T20:06:35,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742303_1479 (size=349424) 2024-11-13T20:06:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742303_1479 (size=349424) 2024-11-13T20:06:35,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742303_1479 (size=349424) 2024-11-13T20:06:35,803 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:36778 2024-11-13T20:06:37,040 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1731528172678_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:06:37,041 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041 2024-11-13T20:06:37,041 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:06:37,069 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:06:37,069 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T20:06:37,072 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:06:37,076 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T20:06:37,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742304_1480 (size=156) 2024-11-13T20:06:37,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742304_1480 (size=156) 2024-11-13T20:06:37,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742304_1480 (size=156) 2024-11-13T20:06:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742305_1481 (size=951) 2024-11-13T20:06:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742305_1481 (size=951) 2024-11-13T20:06:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742305_1481 (size=951) 2024-11-13T20:06:37,099 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:37,099 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:37,099 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,162 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-5074403262253045084.jar 2024-11-13T20:06:38,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-13809286978893344360.jar 2024-11-13T20:06:38,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:06:38,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:06:38,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:06:38,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:06:38,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:06:38,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:06:38,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:06:38,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:06:38,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:06:38,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:06:38,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:06:38,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:06:38,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:06:38,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:06:38,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:06:38,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:06:38,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:06:38,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:06:38,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:06:38,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742306_1482 (size=131440) 2024-11-13T20:06:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742306_1482 (size=131440) 2024-11-13T20:06:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742306_1482 (size=131440) 2024-11-13T20:06:38,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742307_1483 (size=4188619) 2024-11-13T20:06:38,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742307_1483 (size=4188619) 2024-11-13T20:06:38,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742307_1483 (size=4188619) 2024-11-13T20:06:38,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742308_1484 (size=1323991) 2024-11-13T20:06:38,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742308_1484 (size=1323991) 2024-11-13T20:06:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742308_1484 (size=1323991) 2024-11-13T20:06:38,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742309_1485 (size=903730) 2024-11-13T20:06:38,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742309_1485 (size=903730) 2024-11-13T20:06:38,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742309_1485 (size=903730) 2024-11-13T20:06:38,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742310_1486 (size=8360083) 2024-11-13T20:06:38,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742310_1486 (size=8360083) 2024-11-13T20:06:38,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742310_1486 (size=8360083) 2024-11-13T20:06:38,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742311_1487 (size=440392) 2024-11-13T20:06:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742311_1487 (size=440392) 2024-11-13T20:06:38,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742311_1487 (size=440392) 2024-11-13T20:06:38,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742312_1488 (size=6424542) 2024-11-13T20:06:38,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742312_1488 (size=6424542) 2024-11-13T20:06:38,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742312_1488 (size=6424542) 2024-11-13T20:06:38,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742313_1489 (size=1877034) 2024-11-13T20:06:38,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742313_1489 (size=1877034) 2024-11-13T20:06:38,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742313_1489 (size=1877034) 2024-11-13T20:06:38,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742314_1490 (size=77835) 2024-11-13T20:06:38,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742314_1490 (size=77835) 2024-11-13T20:06:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742314_1490 (size=77835) 2024-11-13T20:06:38,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742315_1491 (size=30949) 2024-11-13T20:06:38,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742315_1491 (size=30949) 2024-11-13T20:06:38,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742315_1491 (size=30949) 2024-11-13T20:06:38,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742316_1492 (size=1597327) 2024-11-13T20:06:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742316_1492 (size=1597327) 2024-11-13T20:06:38,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742316_1492 (size=1597327) 2024-11-13T20:06:38,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742317_1493 (size=4695811) 2024-11-13T20:06:38,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742317_1493 (size=4695811) 2024-11-13T20:06:38,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742317_1493 (size=4695811) 2024-11-13T20:06:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742318_1494 (size=232957) 2024-11-13T20:06:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742318_1494 (size=232957) 2024-11-13T20:06:38,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742318_1494 (size=232957) 2024-11-13T20:06:38,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742319_1495 (size=127628) 2024-11-13T20:06:38,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742319_1495 (size=127628) 2024-11-13T20:06:38,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742319_1495 (size=127628) 2024-11-13T20:06:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742320_1496 (size=20406) 2024-11-13T20:06:38,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742320_1496 (size=20406) 2024-11-13T20:06:38,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742320_1496 (size=20406) 2024-11-13T20:06:38,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742321_1497 (size=5175431) 2024-11-13T20:06:38,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742321_1497 (size=5175431) 2024-11-13T20:06:38,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742321_1497 (size=5175431) 2024-11-13T20:06:38,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742322_1498 (size=217634) 2024-11-13T20:06:38,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742322_1498 (size=217634) 2024-11-13T20:06:38,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742322_1498 (size=217634) 2024-11-13T20:06:38,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742323_1499 (size=1832290) 2024-11-13T20:06:38,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742323_1499 (size=1832290) 2024-11-13T20:06:38,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742323_1499 (size=1832290) 2024-11-13T20:06:38,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742324_1500 (size=322274) 2024-11-13T20:06:38,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742324_1500 (size=322274) 2024-11-13T20:06:38,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742324_1500 (size=322274) 2024-11-13T20:06:38,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742325_1501 (size=503880) 2024-11-13T20:06:38,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742325_1501 (size=503880) 2024-11-13T20:06:38,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742325_1501 (size=503880) 2024-11-13T20:06:38,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742326_1502 (size=29229) 2024-11-13T20:06:38,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742326_1502 (size=29229) 2024-11-13T20:06:38,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742326_1502 (size=29229) 2024-11-13T20:06:38,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742327_1503 (size=24096) 2024-11-13T20:06:38,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742327_1503 (size=24096) 2024-11-13T20:06:38,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742327_1503 (size=24096) 2024-11-13T20:06:38,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742328_1504 (size=111872) 2024-11-13T20:06:38,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742328_1504 (size=111872) 2024-11-13T20:06:38,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742328_1504 (size=111872) 2024-11-13T20:06:38,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742329_1505 (size=45609) 2024-11-13T20:06:38,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742329_1505 (size=45609) 2024-11-13T20:06:38,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742329_1505 (size=45609) 2024-11-13T20:06:38,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742330_1506 (size=136454) 2024-11-13T20:06:38,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742330_1506 (size=136454) 2024-11-13T20:06:38,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742330_1506 (size=136454) 2024-11-13T20:06:38,733 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:06:38,736 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-13T20:06:38,738 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T20:06:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742331_1507 (size=714) 2024-11-13T20:06:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742331_1507 (size=714) 2024-11-13T20:06:38,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742331_1507 (size=714) 2024-11-13T20:06:38,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742332_1508 (size=15) 2024-11-13T20:06:38,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742332_1508 (size=15) 2024-11-13T20:06:38,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742332_1508 (size=15) 2024-11-13T20:06:38,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742333_1509 (size=303726) 2024-11-13T20:06:38,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742333_1509 (size=303726) 2024-11-13T20:06:38,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742333_1509 (size=303726) 2024-11-13T20:06:38,853 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:06:39,542 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d91fda21e751725ceff2a6bf6a3f748e, had cached 0 bytes from a total of 6720 2024-11-13T20:06:39,545 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 299c0b531e11d4010454cb822587984b, had cached 0 bytes from a total of 14047 2024-11-13T20:06:41,886 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:06:41,886 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:06:41,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0008_000001 (auth:SIMPLE) from 127.0.0.1:53420 2024-11-13T20:06:41,902 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000001/launch_container.sh] 2024-11-13T20:06:41,902 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000001/container_tokens] 2024-11-13T20:06:41,902 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_3/usercache/jenkins/appcache/application_1731528172678_0008/container_1731528172678_0008_01_000001/sysfs] 2024-11-13T20:06:42,279 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0009_000001 (auth:SIMPLE) from 127.0.0.1:59484 2024-11-13T20:06:44,406 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 299c0b531e11d4010454cb822587984b changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:06:44,406 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d91fda21e751725ceff2a6bf6a3f748e changed from -1.0 to 0.0, refreshing cache 2024-11-13T20:06:49,506 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0009_000001 (auth:SIMPLE) from 127.0.0.1:40014 2024-11-13T20:06:50,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742334_1510 (size=349376) 2024-11-13T20:06:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742334_1510 (size=349376) 2024-11-13T20:06:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742334_1510 (size=349376) 2024-11-13T20:06:51,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0009_000001 (auth:SIMPLE) from 127.0.0.1:46224 2024-11-13T20:06:55,411 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 02ff9aeb72cf9b21fd2a7dfcab36dd97, had cached 0 bytes from a total of 5688 2024-11-13T20:06:55,411 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b2c0df47f245978cdca563d59f9f3349, had cached 0 bytes from a total of 14861 2024-11-13T20:06:56,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742335_1511 (size=14047) 2024-11-13T20:06:56,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742335_1511 (size=14047) 2024-11-13T20:06:56,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742335_1511 (size=14047) 2024-11-13T20:06:56,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742336_1512 (size=7892) 2024-11-13T20:06:56,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742336_1512 (size=7892) 2024-11-13T20:06:56,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742336_1512 (size=7892) 2024-11-13T20:06:57,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742337_1513 (size=6720) 2024-11-13T20:06:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742337_1513 (size=6720) 2024-11-13T20:06:57,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742337_1513 (size=6720) 2024-11-13T20:06:58,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742338_1514 (size=5381) 2024-11-13T20:06:58,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742338_1514 (size=5381) 2024-11-13T20:06:58,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742338_1514 (size=5381) 2024-11-13T20:06:58,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742339_1515 (size=17459) 2024-11-13T20:06:58,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742339_1515 (size=17459) 2024-11-13T20:06:58,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742339_1515 (size=17459) 2024-11-13T20:06:58,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742340_1516 (size=462) 2024-11-13T20:06:58,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742340_1516 (size=462) 2024-11-13T20:06:58,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742340_1516 (size=462) 2024-11-13T20:06:58,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_2/usercache/jenkins/appcache/application_1731528172678_0009/container_1731528172678_0009_01_000002/launch_container.sh] 2024-11-13T20:06:58,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_2/usercache/jenkins/appcache/application_1731528172678_0009/container_1731528172678_0009_01_000002/container_tokens] 2024-11-13T20:06:58,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_2/usercache/jenkins/appcache/application_1731528172678_0009/container_1731528172678_0009_01_000002/sysfs] 2024-11-13T20:06:58,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742341_1517 (size=17459) 2024-11-13T20:06:58,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742341_1517 (size=17459) 2024-11-13T20:06:58,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742341_1517 (size=17459) 2024-11-13T20:06:58,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742342_1518 (size=349376) 2024-11-13T20:06:58,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742342_1518 (size=349376) 2024-11-13T20:06:58,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742342_1518 (size=349376) 2024-11-13T20:06:58,611 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0009_000001 (auth:SIMPLE) from 127.0.0.1:46238 2024-11-13T20:07:00,140 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:07:00,150 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:07:00,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-13T20:07:00,209 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:07:00,217 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:07:00,217 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-13T20:07:00,225 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-13T20:07:00,225 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-13T20:07:00,225 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-13T20:07:00,237 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-13T20:07:00,237 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528397041/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-13T20:07:00,275 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-13T20:07:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,282 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528420282"}]},"ts":"1731528420282"} 2024-11-13T20:07:00,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-13T20:07:00,290 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-13T20:07:00,290 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-13T20:07:00,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-13T20:07:00,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, UNASSIGN}] 2024-11-13T20:07:00,298 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, UNASSIGN 2024-11-13T20:07:00,299 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=299c0b531e11d4010454cb822587984b, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:07:00,299 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, UNASSIGN 2024-11-13T20:07:00,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, UNASSIGN because future has completed 2024-11-13T20:07:00,302 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=d91fda21e751725ceff2a6bf6a3f748e, regionState=CLOSING, regionLocation=54d0784d1943,39533,1731528161273 2024-11-13T20:07:00,308 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=54d0784d1943,39533,1731528161273, table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T20:07:00,309 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:07:00,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 299c0b531e11d4010454cb822587984b, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:07:00,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, UNASSIGN because future has completed 2024-11-13T20:07:00,314 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:07:00,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure d91fda21e751725ceff2a6bf6a3f748e, server=54d0784d1943,39533,1731528161273}] 2024-11-13T20:07:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-13T20:07:00,464 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close 299c0b531e11d4010454cb822587984b 2024-11-13T20:07:00,464 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:07:00,464 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing 299c0b531e11d4010454cb822587984b, disabling compactions & flushes 2024-11-13T20:07:00,464 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:07:00,464 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:07:00,464 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. after waiting 0 ms 2024-11-13T20:07:00,464 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:07:00,468 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:07:00,469 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:07:00,469 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing d91fda21e751725ceff2a6bf6a3f748e, disabling compactions & flushes 2024-11-13T20:07:00,469 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:07:00,469 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:07:00,469 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. after waiting 0 ms 2024-11-13T20:07:00,469 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:07:00,484 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:07:00,484 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:07:00,485 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:07:00,485 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:07:00,485 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e. 2024-11-13T20:07:00,486 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b. 2024-11-13T20:07:00,486 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for d91fda21e751725ceff2a6bf6a3f748e: Waiting for close lock at 1731528420469Running coprocessor pre-close hooks at 1731528420469Disabling compacts and flushes for region at 1731528420469Disabling writes for close at 1731528420469Writing region close event to WAL at 1731528420471 (+2 ms)Running coprocessor post-close hooks at 1731528420485 (+14 ms)Closed at 1731528420485 2024-11-13T20:07:00,486 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for 299c0b531e11d4010454cb822587984b: Waiting for close lock at 1731528420464Running coprocessor pre-close hooks at 1731528420464Disabling compacts and flushes for region at 1731528420464Disabling writes for close at 1731528420464Writing region close event to WAL at 1731528420467 (+3 ms)Running coprocessor post-close hooks at 1731528420485 (+18 ms)Closed at 1731528420486 (+1 ms) 2024-11-13T20:07:00,491 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:07:00,492 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=d91fda21e751725ceff2a6bf6a3f748e, regionState=CLOSED 2024-11-13T20:07:00,493 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed 299c0b531e11d4010454cb822587984b 2024-11-13T20:07:00,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure d91fda21e751725ceff2a6bf6a3f748e, server=54d0784d1943,39533,1731528161273 because future has completed 2024-11-13T20:07:00,495 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=299c0b531e11d4010454cb822587984b, regionState=CLOSED 2024-11-13T20:07:00,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 299c0b531e11d4010454cb822587984b, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:07:00,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=188 2024-11-13T20:07:00,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure d91fda21e751725ceff2a6bf6a3f748e, server=54d0784d1943,39533,1731528161273 in 185 msec 2024-11-13T20:07:00,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-11-13T20:07:00,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure 299c0b531e11d4010454cb822587984b, server=54d0784d1943,33333,1731528161197 in 192 msec 2024-11-13T20:07:00,508 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d91fda21e751725ceff2a6bf6a3f748e, UNASSIGN in 208 msec 2024-11-13T20:07:00,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=187 2024-11-13T20:07:00,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=299c0b531e11d4010454cb822587984b, UNASSIGN in 210 msec 2024-11-13T20:07:00,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-13T20:07:00,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 219 msec 2024-11-13T20:07:00,514 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528420514"}]},"ts":"1731528420514"} 2024-11-13T20:07:00,516 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-13T20:07:00,516 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-13T20:07:00,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 242 msec 2024-11-13T20:07:00,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-13T20:07:00,606 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T20:07:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-13T20:07:00,610 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-13T20:07:00,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,614 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-13T20:07:00,615 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,618 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-13T20:07:00,619 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:07:00,619 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b 2024-11-13T20:07:00,622 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/recovered.edits] 2024-11-13T20:07:00,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,627 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T20:07:00,627 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T20:07:00,628 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T20:07:00,628 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T20:07:00,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:00,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:00,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:00,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-13T20:07:00,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T20:07:00,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:00,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:00,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:00,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:00,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:00,638 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/recovered.edits] 2024-11-13T20:07:00,645 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf/788f6ded517f4cbfb03549065f2d1d0e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/cf/788f6ded517f4cbfb03549065f2d1d0e 2024-11-13T20:07:00,646 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/cf/ae80db9ea1f84b62b00c600e5943bd0d 2024-11-13T20:07:00,651 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e/recovered.edits/9.seqid 2024-11-13T20:07:00,652 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:07:00,652 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b/recovered.edits/9.seqid 2024-11-13T20:07:00,653 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportWithChecksum/299c0b531e11d4010454cb822587984b 2024-11-13T20:07:00,653 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-13T20:07:00,654 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-13T20:07:00,654 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-11-13T20:07:00,660 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241113f5194990ba9b484ebad4a4abf63719c5_299c0b531e11d4010454cb822587984b 2024-11-13T20:07:00,662 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241113d3511af0e2f6437b97ef934f07304ab6_d91fda21e751725ceff2a6bf6a3f748e 2024-11-13T20:07:00,663 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-13T20:07:00,669 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,672 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-13T20:07:00,676 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-13T20:07:00,677 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,677 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-13T20:07:00,677 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528420677"}]},"ts":"9223372036854775807"} 2024-11-13T20:07:00,677 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528420677"}]},"ts":"9223372036854775807"} 2024-11-13T20:07:00,679 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:07:00,679 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d91fda21e751725ceff2a6bf6a3f748e, NAME => 'testtb-testExportWithChecksum,,1731528353744.d91fda21e751725ceff2a6bf6a3f748e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 299c0b531e11d4010454cb822587984b, NAME => 'testtb-testExportWithChecksum,1,1731528353744.299c0b531e11d4010454cb822587984b.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:07:00,679 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-13T20:07:00,680 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528420679"}]},"ts":"9223372036854775807"} 2024-11-13T20:07:00,682 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-13T20:07:00,683 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T20:07:00,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 73 msec 2024-11-13T20:07:00,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-13T20:07:00,736 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-13T20:07:00,736 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T20:07:00,744 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-13T20:07:00,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-13T20:07:00,748 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-13T20:07:00,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-13T20:07:00,786 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=790 (was 791), OpenFileDescriptor=777 (was 784), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1104 (was 691) - SystemLoadAverage LEAK? -, ProcessCount=18 (was 18), AvailableMemoryMB=2094 (was 2065) - AvailableMemoryMB LEAK? - 2024-11-13T20:07:00,786 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-11-13T20:07:00,810 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=790, OpenFileDescriptor=777, MaxFileDescriptor=1048576, SystemLoadAverage=1104, ProcessCount=18, AvailableMemoryMB=2091 2024-11-13T20:07:00,810 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=790 is superior to 500 2024-11-13T20:07:00,812 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T20:07:00,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:00,817 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T20:07:00,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-13T20:07:00,819 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T20:07:00,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-13T20:07:00,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742343_1519 (size=454) 2024-11-13T20:07:00,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742343_1519 (size=454) 2024-11-13T20:07:00,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742343_1519 (size=454) 2024-11-13T20:07:00,830 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ae53b59159ae889879a4e0904ce57f75, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:07:00,830 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a7a1474ecf08e9b99b55f26e2a02ebc8, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:07:00,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742344_1520 (size=79) 2024-11-13T20:07:00,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742344_1520 (size=79) 2024-11-13T20:07:00,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742344_1520 (size=79) 2024-11-13T20:07:00,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742345_1521 (size=79) 2024-11-13T20:07:00,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:07:00,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742345_1521 (size=79) 2024-11-13T20:07:00,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing ae53b59159ae889879a4e0904ce57f75, disabling compactions & flushes 2024-11-13T20:07:00,853 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:00,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:00,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. after waiting 0 ms 2024-11-13T20:07:00,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:00,853 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:00,853 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for ae53b59159ae889879a4e0904ce57f75: Waiting for close lock at 1731528420853Disabling compacts and flushes for region at 1731528420853Disabling writes for close at 1731528420853Writing region close event to WAL at 1731528420853Closed at 1731528420853 2024-11-13T20:07:00,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742345_1521 (size=79) 2024-11-13T20:07:00,854 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:07:00,854 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing a7a1474ecf08e9b99b55f26e2a02ebc8, disabling compactions & flushes 2024-11-13T20:07:00,854 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:00,854 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:00,854 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. after waiting 0 ms 2024-11-13T20:07:00,854 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:00,854 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:00,854 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for a7a1474ecf08e9b99b55f26e2a02ebc8: Waiting for close lock at 1731528420854Disabling compacts and flushes for region at 1731528420854Disabling writes for close at 1731528420854Writing region close event to WAL at 1731528420854Closed at 1731528420854 2024-11-13T20:07:00,855 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T20:07:00,856 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731528420856"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528420856"}]},"ts":"1731528420856"} 2024-11-13T20:07:00,856 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731528420856"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731528420856"}]},"ts":"1731528420856"} 2024-11-13T20:07:00,858 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T20:07:00,859 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T20:07:00,859 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528420859"}]},"ts":"1731528420859"} 2024-11-13T20:07:00,861 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-13T20:07:00,861 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:07:00,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T20:07:00,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T20:07:00,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T20:07:00,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:07:00,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:07:00,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:07:00,862 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:07:00,862 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:07:00,862 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:07:00,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T20:07:00,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, ASSIGN}] 2024-11-13T20:07:00,864 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, ASSIGN 2024-11-13T20:07:00,864 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, ASSIGN 2024-11-13T20:07:00,865 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, ASSIGN; state=OFFLINE, location=54d0784d1943,33333,1731528161197; forceNewPlan=false, retain=false 2024-11-13T20:07:00,865 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, ASSIGN; state=OFFLINE, location=54d0784d1943,36439,1731528160987; forceNewPlan=false, retain=false 2024-11-13T20:07:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-13T20:07:01,016 INFO [54d0784d1943:34357 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T20:07:01,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=a7a1474ecf08e9b99b55f26e2a02ebc8, regionState=OPENING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:07:01,017 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=ae53b59159ae889879a4e0904ce57f75, regionState=OPENING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:07:01,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, ASSIGN because future has completed 2024-11-13T20:07:01,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae53b59159ae889879a4e0904ce57f75, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:07:01,020 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, ASSIGN because future has completed 2024-11-13T20:07:01,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:07:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-13T20:07:01,176 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,177 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:01,177 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => ae53b59159ae889879a4e0904ce57f75, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T20:07:01,177 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => a7a1474ecf08e9b99b55f26e2a02ebc8, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T20:07:01,177 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. service=AccessControlService 2024-11-13T20:07:01,177 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:07:01,177 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,177 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. service=AccessControlService 2024-11-13T20:07:01,177 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:07:01,178 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,178 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,178 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T20:07:01,178 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,178 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T20:07:01,178 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,178 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,179 INFO [StoreOpener-ae53b59159ae889879a4e0904ce57f75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,181 INFO [StoreOpener-ae53b59159ae889879a4e0904ce57f75-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ae53b59159ae889879a4e0904ce57f75 columnFamilyName cf 2024-11-13T20:07:01,184 DEBUG [StoreOpener-ae53b59159ae889879a4e0904ce57f75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:07:01,184 INFO [StoreOpener-ae53b59159ae889879a4e0904ce57f75-1 {}] regionserver.HStore(327): Store=ae53b59159ae889879a4e0904ce57f75/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:07:01,185 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,185 INFO [StoreOpener-a7a1474ecf08e9b99b55f26e2a02ebc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,186 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,186 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,186 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,186 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,187 INFO [StoreOpener-a7a1474ecf08e9b99b55f26e2a02ebc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a7a1474ecf08e9b99b55f26e2a02ebc8 columnFamilyName cf 2024-11-13T20:07:01,187 DEBUG [StoreOpener-a7a1474ecf08e9b99b55f26e2a02ebc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:07:01,188 INFO [StoreOpener-a7a1474ecf08e9b99b55f26e2a02ebc8-1 {}] regionserver.HStore(327): Store=a7a1474ecf08e9b99b55f26e2a02ebc8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T20:07:01,188 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,188 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,189 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,190 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,190 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,190 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,190 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:07:01,191 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened ae53b59159ae889879a4e0904ce57f75; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59382864, jitterRate=-0.11512637138366699}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:07:01,191 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,191 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for ae53b59159ae889879a4e0904ce57f75: Running coprocessor pre-open hook at 1731528421178Writing region info on filesystem at 1731528421178Initializing all the Stores at 1731528421178Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528421178Cleaning up temporary data from old regions at 1731528421187 (+9 ms)Running coprocessor post-open hooks at 1731528421191 (+4 ms)Region opened successfully at 1731528421191 2024-11-13T20:07:01,192 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75., pid=196, masterSystemTime=1731528421171 2024-11-13T20:07:01,193 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,195 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,195 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,195 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=ae53b59159ae889879a4e0904ce57f75, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:07:01,196 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T20:07:01,197 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened a7a1474ecf08e9b99b55f26e2a02ebc8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64644076, jitterRate=-0.03672820329666138}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T20:07:01,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae53b59159ae889879a4e0904ce57f75, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:07:01,197 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,197 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for a7a1474ecf08e9b99b55f26e2a02ebc8: Running coprocessor pre-open hook at 1731528421178Writing region info on filesystem at 1731528421178Initializing all the Stores at 1731528421179 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731528421179Cleaning up temporary data from old regions at 1731528421190 (+11 ms)Running coprocessor post-open hooks at 1731528421197 (+7 ms)Region opened successfully at 1731528421197 2024-11-13T20:07:01,198 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8., pid=197, masterSystemTime=1731528421172 2024-11-13T20:07:01,200 DEBUG [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:01,200 INFO [RS_OPEN_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:01,200 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=a7a1474ecf08e9b99b55f26e2a02ebc8, regionState=OPEN, openSeqNum=2, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:07:01,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=194 2024-11-13T20:07:01,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure ae53b59159ae889879a4e0904ce57f75, server=54d0784d1943,36439,1731528160987 in 179 msec 2024-11-13T20:07:01,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, ASSIGN in 337 msec 2024-11-13T20:07:01,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:07:01,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-11-13T20:07:01,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8, server=54d0784d1943,33333,1731528161197 in 183 msec 2024-11-13T20:07:01,210 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=195, resume processing ppid=193 2024-11-13T20:07:01,210 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, ASSIGN in 342 msec 2024-11-13T20:07:01,211 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T20:07:01,211 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528421211"}]},"ts":"1731528421211"} 2024-11-13T20:07:01,213 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-13T20:07:01,215 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T20:07:01,216 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-13T20:07:01,220 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-13T20:07:01,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:01,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:01,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:01,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:01,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 414 msec 2024-11-13T20:07:01,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-13T20:07:01,445 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T20:07:01,446 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,449 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,449 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,449 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:07:01,451 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,458 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,464 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,467 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T20:07:01,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528421467 (current time:1731528421467). 2024-11-13T20:07:01,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:07:01,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-13T20:07:01,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:07:01,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d6fe16a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:07:01,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:07:01,469 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:07:01,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:07:01,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:07:01,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c93d427, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:07:01,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:07:01,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,471 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:07:01,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d00f057, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:07:01,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:07:01,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:07:01,477 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:07:01,479 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:07:01,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:07:01,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,479 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:07:01,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6914be3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:07:01,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:07:01,481 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:07:01,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:07:01,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:07:01,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37291d7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:07:01,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:07:01,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,482 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46446, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:07:01,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35cb7299, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:07:01,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:07:01,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:07:01,485 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:07:01,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:07:01,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:07:01,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:07:01,488 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:07:01,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:07:01,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,489 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:07:01,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-13T20:07:01,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:07:01,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T20:07:01,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-13T20:07:01,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-13T20:07:01,491 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:07:01,492 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:07:01,494 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:07:01,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742346_1522 (size=203) 2024-11-13T20:07:01,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742346_1522 (size=203) 2024-11-13T20:07:01,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742346_1522 (size=203) 2024-11-13T20:07:01,500 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:07:01,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8}] 2024-11-13T20:07:01,501 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,501 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-13T20:07:01,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-13T20:07:01,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-13T20:07:01,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:01,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for a7a1474ecf08e9b99b55f26e2a02ebc8: 2024-11-13T20:07:01,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for ae53b59159ae889879a4e0904ce57f75: 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:07:01,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T20:07:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742348_1524 (size=82) 2024-11-13T20:07:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742348_1524 (size=82) 2024-11-13T20:07:01,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742348_1524 (size=82) 2024-11-13T20:07:01,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:01,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-13T20:07:01,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-13T20:07:01,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,691 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,694 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8 in 192 msec 2024-11-13T20:07:01,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742347_1523 (size=82) 2024-11-13T20:07:01,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742347_1523 (size=82) 2024-11-13T20:07:01,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742347_1523 (size=82) 2024-11-13T20:07:01,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-13T20:07:01,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-13T20:07:01,702 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,702 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=198 2024-11-13T20:07:01,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75 in 203 msec 2024-11-13T20:07:01,704 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:07:01,705 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:07:01,706 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:07:01,706 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:07:01,706 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:07:01,707 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T20:07:01,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742349_1525 (size=74) 2024-11-13T20:07:01,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742349_1525 (size=74) 2024-11-13T20:07:01,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742349_1525 (size=74) 2024-11-13T20:07:01,717 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:07:01,717 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,718 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742350_1526 (size=697) 2024-11-13T20:07:01,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742350_1526 (size=697) 2024-11-13T20:07:01,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742350_1526 (size=697) 2024-11-13T20:07:01,729 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:07:01,732 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:07:01,733 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,734 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:07:01,734 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-13T20:07:01,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 245 msec 2024-11-13T20:07:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-13T20:07:01,805 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T20:07:01,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36439 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:07:01,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33333 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T20:07:01,816 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,818 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:01,818 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:01,818 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T20:07:01,820 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,824 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,829 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T20:07:01,831 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T20:07:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731528421831 (current time:1731528421831). 2024-11-13T20:07:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T20:07:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-13T20:07:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T20:07:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@723caede, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:07:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:07:01,833 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:07:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:07:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:07:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61136a27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:07:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:07:01,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,834 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:07:01,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15fc31e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:07:01,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:07:01,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:07:01,836 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:07:01,837 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:07:01,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:07:01,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:07:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29000431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ClusterIdFetcher(90): Going to request 54d0784d1943,34357,-1 for getting cluster id 2024-11-13T20:07:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T20:07:01,839 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f805b11d-02b0-4f5c-91d0-d72e768276fd' 2024-11-13T20:07:01,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T20:07:01,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f805b11d-02b0-4f5c-91d0-d72e768276fd" 2024-11-13T20:07:01,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768a5e4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [54d0784d1943,34357,-1] 2024-11-13T20:07:01,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T20:07:01,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,840 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T20:07:01,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3018563, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T20:07:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T20:07:01,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=54d0784d1943,33333,1731528161197, seqNum=-1] 2024-11-13T20:07:01,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:07:01,843 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33460, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:07:01,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., hostname=54d0784d1943,36439,1731528160987, seqNum=2] 2024-11-13T20:07:01,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T20:07:01,845 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T20:07:01,846 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357. 2024-11-13T20:07:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor248.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T20:07:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:07:01,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-13T20:07:01,847 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:07:01,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T20:07:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T20:07:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-13T20:07:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T20:07:01,851 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T20:07:01,853 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T20:07:01,855 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T20:07:01,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742351_1527 (size=198) 2024-11-13T20:07:01,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742351_1527 (size=198) 2024-11-13T20:07:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742351_1527 (size=198) 2024-11-13T20:07:01,874 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T20:07:01,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8}] 2024-11-13T20:07:01,879 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:01,880 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T20:07:02,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36439 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-13T20:07:02,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33333 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-13T20:07:02,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:02,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:02,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing ae53b59159ae889879a4e0904ce57f75 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-13T20:07:02,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing a7a1474ecf08e9b99b55f26e2a02ebc8 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-13T20:07:02,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8 is 71, key is 125890e85afba99f3576141e6cf6f89d/cf:q/1731528421815/Put/seqid=0 2024-11-13T20:07:02,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75 is 71, key is 049346043a03093df6a85f938400a510/cf:q/1731528421813/Put/seqid=0 2024-11-13T20:07:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742352_1528 (size=8171) 2024-11-13T20:07:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742352_1528 (size=8171) 2024-11-13T20:07:02,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742352_1528 (size=8171) 2024-11-13T20:07:02,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:07:02,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:02,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/.tmp/cf/9d9e90659d124dc4bc052734083faed7, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=a7a1474ecf08e9b99b55f26e2a02ebc8] 2024-11-13T20:07:02,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/.tmp/cf/9d9e90659d124dc4bc052734083faed7 is 220, key is 12687e4588ee06a5b12c3ad29b8153da8/cf:q/1731528421815/Put/seqid=0 2024-11-13T20:07:02,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742353_1529 (size=5101) 2024-11-13T20:07:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742353_1529 (size=5101) 2024-11-13T20:07:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742353_1529 (size=5101) 2024-11-13T20:07:02,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:07:02,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:02,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/.tmp/cf/72525a3873164a7db73cfc81f6929ffe, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=ae53b59159ae889879a4e0904ce57f75] 2024-11-13T20:07:02,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/.tmp/cf/72525a3873164a7db73cfc81f6929ffe is 220, key is 0a63728b9b86b3f7e5c5fc201809edf62/cf:q/1731528421813/Put/seqid=0 2024-11-13T20:07:02,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742354_1530 (size=15525) 2024-11-13T20:07:02,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742354_1530 (size=15525) 2024-11-13T20:07:02,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742354_1530 (size=15525) 2024-11-13T20:07:02,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/.tmp/cf/9d9e90659d124dc4bc052734083faed7 2024-11-13T20:07:02,156 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/.tmp/cf/9d9e90659d124dc4bc052734083faed7 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf/9d9e90659d124dc4bc052734083faed7 2024-11-13T20:07:02,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T20:07:02,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf/9d9e90659d124dc4bc052734083faed7, entries=47, sequenceid=6, filesize=15.2 K 2024-11-13T20:07:02,170 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for a7a1474ecf08e9b99b55f26e2a02ebc8 in 137ms, sequenceid=6, compaction requested=false 2024-11-13T20:07:02,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-13T20:07:02,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for a7a1474ecf08e9b99b55f26e2a02ebc8: 2024-11-13T20:07:02,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T20:07:02,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:02,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:07:02,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf/9d9e90659d124dc4bc052734083faed7] hfiles 2024-11-13T20:07:02,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf/9d9e90659d124dc4bc052734083faed7 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742355_1531 (size=5960) 2024-11-13T20:07:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742355_1531 (size=5960) 2024-11-13T20:07:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742355_1531 (size=5960) 2024-11-13T20:07:02,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742356_1532 (size=121) 2024-11-13T20:07:02,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742356_1532 (size=121) 2024-11-13T20:07:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:02,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742356_1532 (size=121) 2024-11-13T20:07:02,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-13T20:07:02,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-13T20:07:02,231 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:02,231 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:02,246 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8 in 364 msec 2024-11-13T20:07:02,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T20:07:02,612 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/.tmp/cf/72525a3873164a7db73cfc81f6929ffe 2024-11-13T20:07:02,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/.tmp/cf/72525a3873164a7db73cfc81f6929ffe as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf/72525a3873164a7db73cfc81f6929ffe 2024-11-13T20:07:02,621 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf/72525a3873164a7db73cfc81f6929ffe, entries=3, sequenceid=6, filesize=5.8 K 2024-11-13T20:07:02,622 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ae53b59159ae889879a4e0904ce57f75 in 590ms, sequenceid=6, compaction requested=false 2024-11-13T20:07:02,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for ae53b59159ae889879a4e0904ce57f75: 2024-11-13T20:07:02,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T20:07:02,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:02,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T20:07:02,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf/72525a3873164a7db73cfc81f6929ffe] hfiles 2024-11-13T20:07:02,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf/72525a3873164a7db73cfc81f6929ffe for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:02,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742357_1533 (size=121) 2024-11-13T20:07:02,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742357_1533 (size=121) 2024-11-13T20:07:02,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742357_1533 (size=121) 2024-11-13T20:07:02,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:02,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/54d0784d1943:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-13T20:07:02,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-13T20:07:02,639 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:02,639 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:02,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=201 2024-11-13T20:07:02,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ae53b59159ae889879a4e0904ce57f75 in 773 msec 2024-11-13T20:07:02,651 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T20:07:02,652 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T20:07:02,654 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T20:07:02,654 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T20:07:02,654 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T20:07:02,658 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75] hfiles 2024-11-13T20:07:02,658 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:02,658 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742358_1534 (size=305) 2024-11-13T20:07:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742358_1534 (size=305) 2024-11-13T20:07:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742358_1534 (size=305) 2024-11-13T20:07:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T20:07:03,088 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T20:07:03,088 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:03,088 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742359_1535 (size=1007) 2024-11-13T20:07:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742359_1535 (size=1007) 2024-11-13T20:07:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742359_1535 (size=1007) 2024-11-13T20:07:03,122 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T20:07:03,135 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T20:07:03,135 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:03,137 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T20:07:03,137 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-13T20:07:03,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 1.2890 sec 2024-11-13T20:07:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T20:07:03,995 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T20:07:03,996 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995 2024-11-13T20:07:03,996 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:43181, tgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995, rawTgtDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995, srcFsUri=hdfs://localhost:43181, srcDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:07:04,027 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:43181, inputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248 2024-11-13T20:07:04,027 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:04,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T20:07:04,033 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:04,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742360_1536 (size=198) 2024-11-13T20:07:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742361_1537 (size=1007) 2024-11-13T20:07:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742361_1537 (size=1007) 2024-11-13T20:07:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742360_1536 (size=198) 2024-11-13T20:07:04,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742360_1536 (size=198) 2024-11-13T20:07:04,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742361_1537 (size=1007) 2024-11-13T20:07:04,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:04,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:04,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:04,748 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0009_000001 (auth:SIMPLE) from 127.0.0.1:49540 2024-11-13T20:07:04,763 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_1/usercache/jenkins/appcache/application_1731528172678_0009/container_1731528172678_0009_01_000001/launch_container.sh] 2024-11-13T20:07:04,763 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_1/usercache/jenkins/appcache/application_1731528172678_0009/container_1731528172678_0009_01_000001/container_tokens] 2024-11-13T20:07:04,764 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_1/usercache/jenkins/appcache/application_1731528172678_0009/container_1731528172678_0009_01_000001/sysfs] 2024-11-13T20:07:05,382 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-5269346876631439316.jar 2024-11-13T20:07:05,382 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,382 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop-11805987365529384113.jar 2024-11-13T20:07:05,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T20:07:05,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T20:07:05,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T20:07:05,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T20:07:05,453 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T20:07:05,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T20:07:05,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T20:07:05,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T20:07:05,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T20:07:05,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T20:07:05,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T20:07:05,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T20:07:05,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:07:05,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:07:05,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:07:05,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:07:05,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T20:07:05,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:07:05,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T20:07:05,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742362_1538 (size=131440) 2024-11-13T20:07:05,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742362_1538 (size=131440) 2024-11-13T20:07:05,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742362_1538 (size=131440) 2024-11-13T20:07:05,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742363_1539 (size=4188619) 2024-11-13T20:07:05,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742363_1539 (size=4188619) 2024-11-13T20:07:05,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742363_1539 (size=4188619) 2024-11-13T20:07:05,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742364_1540 (size=1323991) 2024-11-13T20:07:05,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742364_1540 (size=1323991) 2024-11-13T20:07:05,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742364_1540 (size=1323991) 2024-11-13T20:07:05,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742365_1541 (size=903730) 2024-11-13T20:07:05,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742365_1541 (size=903730) 2024-11-13T20:07:05,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742365_1541 (size=903730) 2024-11-13T20:07:05,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742366_1542 (size=8360083) 2024-11-13T20:07:05,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742366_1542 (size=8360083) 2024-11-13T20:07:05,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742366_1542 (size=8360083) 2024-11-13T20:07:05,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742367_1543 (size=1877034) 2024-11-13T20:07:05,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742367_1543 (size=1877034) 2024-11-13T20:07:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742367_1543 (size=1877034) 2024-11-13T20:07:05,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742368_1544 (size=77835) 2024-11-13T20:07:05,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742368_1544 (size=77835) 2024-11-13T20:07:05,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742368_1544 (size=77835) 2024-11-13T20:07:05,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742369_1545 (size=6424542) 2024-11-13T20:07:05,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742369_1545 (size=6424542) 2024-11-13T20:07:05,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742369_1545 (size=6424542) 2024-11-13T20:07:05,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742370_1546 (size=30949) 2024-11-13T20:07:05,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742370_1546 (size=30949) 2024-11-13T20:07:05,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742370_1546 (size=30949) 2024-11-13T20:07:05,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742371_1547 (size=1597327) 2024-11-13T20:07:05,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742371_1547 (size=1597327) 2024-11-13T20:07:05,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742371_1547 (size=1597327) 2024-11-13T20:07:05,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742372_1548 (size=4695811) 2024-11-13T20:07:05,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742372_1548 (size=4695811) 2024-11-13T20:07:05,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742372_1548 (size=4695811) 2024-11-13T20:07:05,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742373_1549 (size=232957) 2024-11-13T20:07:05,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742373_1549 (size=232957) 2024-11-13T20:07:05,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742373_1549 (size=232957) 2024-11-13T20:07:05,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742374_1550 (size=127628) 2024-11-13T20:07:05,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742374_1550 (size=127628) 2024-11-13T20:07:05,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742374_1550 (size=127628) 2024-11-13T20:07:05,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742375_1551 (size=20406) 2024-11-13T20:07:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742375_1551 (size=20406) 2024-11-13T20:07:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742375_1551 (size=20406) 2024-11-13T20:07:05,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742376_1552 (size=5175431) 2024-11-13T20:07:05,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742376_1552 (size=5175431) 2024-11-13T20:07:05,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742376_1552 (size=5175431) 2024-11-13T20:07:06,020 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:07:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742377_1553 (size=440392) 2024-11-13T20:07:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742377_1553 (size=440392) 2024-11-13T20:07:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742377_1553 (size=440392) 2024-11-13T20:07:06,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742378_1554 (size=217634) 2024-11-13T20:07:06,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742378_1554 (size=217634) 2024-11-13T20:07:06,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742378_1554 (size=217634) 2024-11-13T20:07:06,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742379_1555 (size=1832290) 2024-11-13T20:07:06,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742379_1555 (size=1832290) 2024-11-13T20:07:06,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742379_1555 (size=1832290) 2024-11-13T20:07:06,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742380_1556 (size=322274) 2024-11-13T20:07:06,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742380_1556 (size=322274) 2024-11-13T20:07:06,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742380_1556 (size=322274) 2024-11-13T20:07:06,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742381_1557 (size=503880) 2024-11-13T20:07:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742381_1557 (size=503880) 2024-11-13T20:07:06,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742381_1557 (size=503880) 2024-11-13T20:07:06,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742382_1558 (size=29229) 2024-11-13T20:07:06,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742382_1558 (size=29229) 2024-11-13T20:07:06,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742382_1558 (size=29229) 2024-11-13T20:07:06,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742383_1559 (size=24096) 2024-11-13T20:07:06,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742383_1559 (size=24096) 2024-11-13T20:07:06,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742383_1559 (size=24096) 2024-11-13T20:07:06,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742384_1560 (size=111872) 2024-11-13T20:07:06,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742384_1560 (size=111872) 2024-11-13T20:07:06,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742384_1560 (size=111872) 2024-11-13T20:07:06,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742385_1561 (size=45609) 2024-11-13T20:07:06,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742385_1561 (size=45609) 2024-11-13T20:07:06,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742385_1561 (size=45609) 2024-11-13T20:07:06,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742386_1562 (size=136454) 2024-11-13T20:07:06,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742386_1562 (size=136454) 2024-11-13T20:07:06,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742386_1562 (size=136454) 2024-11-13T20:07:06,592 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T20:07:06,595 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-13T20:07:06,596 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.9 K 2024-11-13T20:07:06,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742387_1563 (size=770) 2024-11-13T20:07:06,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742387_1563 (size=770) 2024-11-13T20:07:06,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742387_1563 (size=770) 2024-11-13T20:07:06,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742388_1564 (size=15) 2024-11-13T20:07:06,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742388_1564 (size=15) 2024-11-13T20:07:06,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742388_1564 (size=15) 2024-11-13T20:07:06,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742389_1565 (size=303900) 2024-11-13T20:07:06,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742389_1565 (size=303900) 2024-11-13T20:07:06,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742389_1565 (size=303900) 2024-11-13T20:07:06,673 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:07:06,673 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T20:07:06,744 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0010_000001 (auth:SIMPLE) from 127.0.0.1:49548 2024-11-13T20:07:08,854 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:07:10,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:10,607 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-13T20:07:14,797 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0010_000001 (auth:SIMPLE) from 127.0.0.1:57330 2024-11-13T20:07:15,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742390_1566 (size=349574) 2024-11-13T20:07:15,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742390_1566 (size=349574) 2024-11-13T20:07:15,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742390_1566 (size=349574) 2024-11-13T20:07:17,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0010_000001 (auth:SIMPLE) from 127.0.0.1:58742 2024-11-13T20:07:23,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742391_1567 (size=15525) 2024-11-13T20:07:23,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742391_1567 (size=15525) 2024-11-13T20:07:23,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742391_1567 (size=15525) 2024-11-13T20:07:23,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742392_1568 (size=8171) 2024-11-13T20:07:23,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742392_1568 (size=8171) 2024-11-13T20:07:23,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742392_1568 (size=8171) 2024-11-13T20:07:23,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742393_1569 (size=5960) 2024-11-13T20:07:23,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742393_1569 (size=5960) 2024-11-13T20:07:23,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742393_1569 (size=5960) 2024-11-13T20:07:23,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742394_1570 (size=5101) 2024-11-13T20:07:23,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742394_1570 (size=5101) 2024-11-13T20:07:23,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742394_1570 (size=5101) 2024-11-13T20:07:23,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742395_1571 (size=17473) 2024-11-13T20:07:23,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742395_1571 (size=17473) 2024-11-13T20:07:23,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742395_1571 (size=17473) 2024-11-13T20:07:24,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742396_1572 (size=476) 2024-11-13T20:07:24,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742396_1572 (size=476) 2024-11-13T20:07:24,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742396_1572 (size=476) 2024-11-13T20:07:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742397_1573 (size=17473) 2024-11-13T20:07:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742397_1573 (size=17473) 2024-11-13T20:07:24,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742397_1573 (size=17473) 2024-11-13T20:07:24,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742398_1574 (size=349574) 2024-11-13T20:07:24,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742398_1574 (size=349574) 2024-11-13T20:07:24,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742398_1574 (size=349574) 2024-11-13T20:07:24,498 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0010_000001 (auth:SIMPLE) from 127.0.0.1:38754 2024-11-13T20:07:25,913 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T20:07:25,913 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T20:07:25,922 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:25,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T20:07:25,923 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T20:07:25,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:25,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-13T20:07:25,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-13T20:07:25,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_113966349_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:25,924 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-13T20:07:25,924 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/export-test/export-1731528423995/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-13T20:07:25,931 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T20:07:25,935 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528445935"}]},"ts":"1731528445935"} 2024-11-13T20:07:25,937 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-13T20:07:25,937 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-13T20:07:25,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-13T20:07:25,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, UNASSIGN}] 2024-11-13T20:07:25,940 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, UNASSIGN 2024-11-13T20:07:25,941 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, UNASSIGN 2024-11-13T20:07:25,942 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=a7a1474ecf08e9b99b55f26e2a02ebc8, regionState=CLOSING, regionLocation=54d0784d1943,33333,1731528161197 2024-11-13T20:07:25,943 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=ae53b59159ae889879a4e0904ce57f75, regionState=CLOSING, regionLocation=54d0784d1943,36439,1731528160987 2024-11-13T20:07:25,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, UNASSIGN because future has completed 2024-11-13T20:07:25,944 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:07:25,944 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8, server=54d0784d1943,33333,1731528161197}] 2024-11-13T20:07:25,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, UNASSIGN because future has completed 2024-11-13T20:07:25,945 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T20:07:25,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure ae53b59159ae889879a4e0904ce57f75, server=54d0784d1943,36439,1731528160987}] 2024-11-13T20:07:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T20:07:26,097 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing a7a1474ecf08e9b99b55f26e2a02ebc8, disabling compactions & flushes 2024-11-13T20:07:26,098 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. after waiting 0 ms 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:26,098 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing ae53b59159ae889879a4e0904ce57f75, disabling compactions & flushes 2024-11-13T20:07:26,098 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. after waiting 0 ms 2024-11-13T20:07:26,098 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:26,102 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:07:26,102 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T20:07:26,103 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:07:26,103 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8. 2024-11-13T20:07:26,103 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for a7a1474ecf08e9b99b55f26e2a02ebc8: Waiting for close lock at 1731528446098Running coprocessor pre-close hooks at 1731528446098Disabling compacts and flushes for region at 1731528446098Disabling writes for close at 1731528446098Writing region close event to WAL at 1731528446099 (+1 ms)Running coprocessor post-close hooks at 1731528446103 (+4 ms)Closed at 1731528446103 2024-11-13T20:07:26,103 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:07:26,103 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75. 2024-11-13T20:07:26,103 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for ae53b59159ae889879a4e0904ce57f75: Waiting for close lock at 1731528446098Running coprocessor pre-close hooks at 1731528446098Disabling compacts and flushes for region at 1731528446098Disabling writes for close at 1731528446098Writing region close event to WAL at 1731528446099 (+1 ms)Running coprocessor post-close hooks at 1731528446103 (+4 ms)Closed at 1731528446103 2024-11-13T20:07:26,105 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:26,105 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=a7a1474ecf08e9b99b55f26e2a02ebc8, regionState=CLOSED 2024-11-13T20:07:26,106 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:26,106 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=ae53b59159ae889879a4e0904ce57f75, regionState=CLOSED 2024-11-13T20:07:26,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8, server=54d0784d1943,33333,1731528161197 because future has completed 2024-11-13T20:07:26,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure ae53b59159ae889879a4e0904ce57f75, server=54d0784d1943,36439,1731528160987 because future has completed 2024-11-13T20:07:26,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-13T20:07:26,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure a7a1474ecf08e9b99b55f26e2a02ebc8, server=54d0784d1943,33333,1731528161197 in 164 msec 2024-11-13T20:07:26,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=206 2024-11-13T20:07:26,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a7a1474ecf08e9b99b55f26e2a02ebc8, UNASSIGN in 171 msec 2024-11-13T20:07:26,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure ae53b59159ae889879a4e0904ce57f75, server=54d0784d1943,36439,1731528160987 in 164 msec 2024-11-13T20:07:26,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=206, resume processing ppid=205 2024-11-13T20:07:26,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae53b59159ae889879a4e0904ce57f75, UNASSIGN in 172 msec 2024-11-13T20:07:26,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-13T20:07:26,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 176 msec 2024-11-13T20:07:26,119 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731528446118"}]},"ts":"1731528446118"} 2024-11-13T20:07:26,120 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-13T20:07:26,120 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-13T20:07:26,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 189 msec 2024-11-13T20:07:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T20:07:26,255 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T20:07:26,256 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,258 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,258 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,260 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36439 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,262 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:26,262 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:26,263 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/recovered.edits] 2024-11-13T20:07:26,263 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf, FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/recovered.edits] 2024-11-13T20:07:26,267 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf/9d9e90659d124dc4bc052734083faed7 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/cf/9d9e90659d124dc4bc052734083faed7 2024-11-13T20:07:26,267 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf/72525a3873164a7db73cfc81f6929ffe to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/cf/72525a3873164a7db73cfc81f6929ffe 2024-11-13T20:07:26,270 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8/recovered.edits/9.seqid 2024-11-13T20:07:26,270 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/recovered.edits/9.seqid to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75/recovered.edits/9.seqid 2024-11-13T20:07:26,271 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:26,271 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:26,271 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-13T20:07:26,271 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-13T20:07:26,272 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-11-13T20:07:26,275 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411131a92e42fd3364341b359a3b01e0cc202_a7a1474ecf08e9b99b55f26e2a02ebc8 2024-11-13T20:07:26,276 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75 to hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411132f088e3b9dd24b018382e77022ff1501_ae53b59159ae889879a4e0904ce57f75 2024-11-13T20:07:26,276 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-13T20:07:26,278 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,280 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-13T20:07:26,282 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-13T20:07:26,283 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,284 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-13T20:07:26,284 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528446284"}]},"ts":"9223372036854775807"} 2024-11-13T20:07:26,284 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731528446284"}]},"ts":"9223372036854775807"} 2024-11-13T20:07:26,289 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T20:07:26,289 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ae53b59159ae889879a4e0904ce57f75, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731528420812.ae53b59159ae889879a4e0904ce57f75.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a7a1474ecf08e9b99b55f26e2a02ebc8, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731528420812.a7a1474ecf08e9b99b55f26e2a02ebc8.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T20:07:26,290 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-13T20:07:26,290 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731528446290"}]},"ts":"9223372036854775807"} 2024-11-13T20:07:26,292 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-13T20:07:26,293 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 37 msec 2024-11-13T20:07:26,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T20:07:26,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T20:07:26,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T20:07:26,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:26,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T20:07:26,325 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data null 2024-11-13T20:07:26,325 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T20:07:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-13T20:07:26,326 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,326 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T20:07:26,329 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:26,329 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:26,329 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:26,330 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T20:07:26,335 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-13T20:07:26,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,338 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-13T20:07:26,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:26,363 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=795 (was 790) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7641 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:43234 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (346890841) connection to localhost/127.0.0.1:38107 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-412636266_1 at /127.0.0.1:43208 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 19561) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:49272 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38107 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_113966349_22 at /127.0.0.1:41798 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=772 (was 777), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1042 (was 1104), ProcessCount=18 (was 18), AvailableMemoryMB=1529 (was 2091) 2024-11-13T20:07:26,363 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-11-13T20:07:26,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-13T20:07:26,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79612663{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T20:07:26,414 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f055f38{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:07:26,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:07:26,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68343f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T20:07:26,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47180253{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:07:29,122 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_2/usercache/jenkins/appcache/application_1731528172678_0010/container_1731528172678_0010_01_000002/launch_container.sh] 2024-11-13T20:07:29,122 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_2/usercache/jenkins/appcache/application_1731528172678_0010/container_1731528172678_0010_01_000002/container_tokens] 2024-11-13T20:07:29,122 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-1_2/usercache/jenkins/appcache/application_1731528172678_0010/container_1731528172678_0010_01_000002/sysfs] 2024-11-13T20:07:30,579 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731528172678_0010_000001 (auth:SIMPLE) from 127.0.0.1:37362 2024-11-13T20:07:30,591 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0010/container_1731528172678_0010_01_000001/launch_container.sh] 2024-11-13T20:07:30,591 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0010/container_1731528172678_0010_01_000001/container_tokens] 2024-11-13T20:07:30,591 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1995071712/yarn-3987864464/MiniMRCluster_1995071712-localDir-nm-0_0/usercache/jenkins/appcache/application_1731528172678_0010/container_1731528172678_0010_01_000001/sysfs] 2024-11-13T20:07:30,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T20:07:31,648 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:07:38,854 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:07:40,411 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 02ff9aeb72cf9b21fd2a7dfcab36dd97, had cached 0 bytes from a total of 5688 2024-11-13T20:07:40,412 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b2c0df47f245978cdca563d59f9f3349, had cached 0 bytes from a total of 14861 2024-11-13T20:07:41,110 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-13T20:07:41,225 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-13T20:07:41,301 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-13T20:07:41,404 DEBUG [master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-13T20:07:41,406 DEBUG [master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T20:07:43,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6002d397{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T20:07:43,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f49b64c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:07:43,428 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:07:43,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e3f4c0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T20:07:43,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57fdba5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:07:43,465 INFO [regionserver/54d0784d1943:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-13T20:07:43,467 INFO [regionserver/54d0784d1943:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-13T20:07:43,498 INFO [regionserver/54d0784d1943:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-13T20:07:44,507 INFO [regionserver/54d0784d1943:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 142366 ms 2024-11-13T20:07:48,412 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-13T20:07:48,419 DEBUG [master/54d0784d1943:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T20:07:48,422 DEBUG [master/54d0784d1943:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-13T20:07:48,423 INFO [master/54d0784d1943:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-13T20:07:48,423 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-13T20:07:48,423 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {54d0784d1943=0} racks are {/default-rack=0} 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T20:07:48,424 INFO [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T20:07:48,424 INFO [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T20:07:48,424 INFO [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T20:07:48,424 DEBUG [master/54d0784d1943:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=3, number of hosts=1, number of racks=1 2024-11-13T20:07:48,429 INFO [master/54d0784d1943:0.Chore.1 {}] balancer.StochasticLoadBalancer(395): Cluster wide - skipping load balancing because weighted average imbalance=0.013994543557037947 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8351403620626563, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8637972257617508, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T20:07:48,429 DEBUG [master/54d0784d1943:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-13T20:07:49,723 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:07:57,429 INFO [regionserver/54d0784d1943:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. because f7446bb3f02b60d610e520d40f8cc773/l has an old edit so flush to free WALs after random delay 140041 ms 2024-11-13T20:08:00,439 ERROR [Thread[Thread-394,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-13T20:08:00,439 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54649c0f{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-13T20:08:00,440 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35ae0374{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:08:00,440 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:08:00,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@295ba610{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T20:08:00,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4adcba8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:08:00,446 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-13T20:08:00,456 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-13T20:08:00,456 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-13T20:08:00,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741830_1006 (size=973994) 2024-11-13T20:08:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741830_1006 (size=973994) 2024-11-13T20:08:00,474 ERROR [Thread[Thread-417,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-13T20:08:00,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38a3a1ea{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-13T20:08:00,482 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@307e6889{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:08:00,482 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:08:00,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7979e0cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T20:08:00,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@153516ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:08:00,484 ERROR [Thread[Thread-375,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-13T20:08:00,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-13T20:08:00,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T20:08:00,485 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T20:08:00,485 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T20:08:00,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,485 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T20:08:00,485 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T20:08:00,486 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1850700870, stopped=false 2024-11-13T20:08:00,486 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,486 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-13T20:08:00,486 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=54d0784d1943,34357,1731528160092 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T20:08:00,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:08:00,492 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T20:08:00,493 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T20:08:00,493 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T20:08:00,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '54d0784d1943,36439,1731528160987' ***** 2024-11-13T20:08:00,494 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:08:00,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T20:08:00,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '54d0784d1943,33333,1731528161197' ***** 2024-11-13T20:08:00,494 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T20:08:00,494 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '54d0784d1943,39533,1731528161273' ***** 2024-11-13T20:08:00,494 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:08:00,494 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T20:08:00,494 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T20:08:00,495 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T20:08:00,495 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T20:08:00,495 INFO [RS:1;54d0784d1943:33333 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T20:08:00,495 INFO [RS:0;54d0784d1943:36439 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T20:08:00,495 INFO [RS:1;54d0784d1943:33333 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T20:08:00,495 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T20:08:00,495 INFO [RS:0;54d0784d1943:36439 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T20:08:00,495 INFO [RS:2;54d0784d1943:39533 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T20:08:00,495 INFO [RS:2;54d0784d1943:39533 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T20:08:00,495 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T20:08:00,495 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T20:08:00,495 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(3091): Received CLOSE for b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:08:00,495 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(3091): Received CLOSE for 02ff9aeb72cf9b21fd2a7dfcab36dd97 2024-11-13T20:08:00,495 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(3091): Received CLOSE for f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(959): stopping server 54d0784d1943,33333,1731528161197 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;54d0784d1943:33333. 2024-11-13T20:08:00,496 DEBUG [RS:1;54d0784d1943:33333 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T20:08:00,496 DEBUG [RS:1;54d0784d1943:33333 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T20:08:00,496 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f7446bb3f02b60d610e520d40f8cc773, disabling compactions & flushes 2024-11-13T20:08:00,496 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T20:08:00,496 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b2c0df47f245978cdca563d59f9f3349, disabling compactions & flushes 2024-11-13T20:08:00,496 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:08:00,496 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:08:00,496 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:08:00,496 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. after waiting 0 ms 2024-11-13T20:08:00,496 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(959): stopping server 54d0784d1943,39533,1731528161273 2024-11-13T20:08:00,496 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. after waiting 0 ms 2024-11-13T20:08:00,496 INFO [RS:2;54d0784d1943:39533 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 02ff9aeb72cf9b21fd2a7dfcab36dd97, disabling compactions & flushes 2024-11-13T20:08:00,497 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing f7446bb3f02b60d610e520d40f8cc773 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-13T20:08:00,497 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. after waiting 0 ms 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:08:00,497 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(959): stopping server 54d0784d1943,36439,1731528160987 2024-11-13T20:08:00,497 INFO [RS:0;54d0784d1943:36439 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T20:08:00,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:08:00,497 INFO [RS:0;54d0784d1943:36439 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;54d0784d1943:36439. 2024-11-13T20:08:00,497 DEBUG [RS:0;54d0784d1943:36439 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T20:08:00,497 DEBUG [RS:0;54d0784d1943:36439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,498 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T20:08:00,498 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T20:08:00,498 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T20:08:00,498 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T20:08:00,498 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1325): Online Regions={f7446bb3f02b60d610e520d40f8cc773=hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773.} 2024-11-13T20:08:00,498 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T20:08:00,498 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T20:08:00,498 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T20:08:00,498 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 02ff9aeb72cf9b21fd2a7dfcab36dd97=testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97.} 2024-11-13T20:08:00,498 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-13T20:08:00,498 DEBUG [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1351): Waiting on f7446bb3f02b60d610e520d40f8cc773 2024-11-13T20:08:00,498 DEBUG [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1351): Waiting on 02ff9aeb72cf9b21fd2a7dfcab36dd97, 1588230740 2024-11-13T20:08:00,497 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:08:00,499 INFO [RS:2;54d0784d1943:39533 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;54d0784d1943:39533. 2024-11-13T20:08:00,499 DEBUG [RS:2;54d0784d1943:39533 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T20:08:00,499 DEBUG [RS:2;54d0784d1943:39533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,499 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T20:08:00,499 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1325): Online Regions={b2c0df47f245978cdca563d59f9f3349=testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349.} 2024-11-13T20:08:00,499 DEBUG [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1351): Waiting on b2c0df47f245978cdca563d59f9f3349 2024-11-13T20:08:00,511 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/02ff9aeb72cf9b21fd2a7dfcab36dd97/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T20:08:00,511 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,511 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:08:00,511 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 02ff9aeb72cf9b21fd2a7dfcab36dd97: Waiting for close lock at 1731528480497Running coprocessor pre-close hooks at 1731528480497Disabling compacts and flushes for region at 1731528480497Disabling writes for close at 1731528480497Writing region close event to WAL at 1731528480505 (+8 ms)Running coprocessor post-close hooks at 1731528480511 (+6 ms)Closed at 1731528480511 2024-11-13T20:08:00,512 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1731528324617.02ff9aeb72cf9b21fd2a7dfcab36dd97. 2024-11-13T20:08:00,512 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/default/testExportExpiredSnapshot/b2c0df47f245978cdca563d59f9f3349/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T20:08:00,512 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,512 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:08:00,513 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b2c0df47f245978cdca563d59f9f3349: Waiting for close lock at 1731528480496Running coprocessor pre-close hooks at 1731528480496Disabling compacts and flushes for region at 1731528480496Disabling writes for close at 1731528480497 (+1 ms)Writing region close event to WAL at 1731528480506 (+9 ms)Running coprocessor post-close hooks at 1731528480512 (+6 ms)Closed at 1731528480512 2024-11-13T20:08:00,513 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349. 2024-11-13T20:08:00,525 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/.tmp/l/a49e3f5473be48d093c98088de2944ae is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1731528321411/DeleteFamily/seqid=0 2024-11-13T20:08:00,533 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/info/370a7238f44c4e18a0ecf22f41199ded is 173, key is testExportExpiredSnapshot,1,1731528324617.b2c0df47f245978cdca563d59f9f3349./info:regioninfo/1731528325453/Put/seqid=0 2024-11-13T20:08:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742399_1575 (size=5695) 2024-11-13T20:08:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742399_1575 (size=5695) 2024-11-13T20:08:00,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742399_1575 (size=5695) 2024-11-13T20:08:00,540 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/.tmp/l/a49e3f5473be48d093c98088de2944ae 2024-11-13T20:08:00,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742400_1576 (size=14362) 2024-11-13T20:08:00,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742400_1576 (size=14362) 2024-11-13T20:08:00,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742400_1576 (size=14362) 2024-11-13T20:08:00,543 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/info/370a7238f44c4e18a0ecf22f41199ded 2024-11-13T20:08:00,544 INFO [regionserver/54d0784d1943:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T20:08:00,546 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a49e3f5473be48d093c98088de2944ae 2024-11-13T20:08:00,547 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/.tmp/l/a49e3f5473be48d093c98088de2944ae as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/l/a49e3f5473be48d093c98088de2944ae 2024-11-13T20:08:00,551 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a49e3f5473be48d093c98088de2944ae 2024-11-13T20:08:00,551 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/l/a49e3f5473be48d093c98088de2944ae, entries=12, sequenceid=27, filesize=5.6 K 2024-11-13T20:08:00,552 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for f7446bb3f02b60d610e520d40f8cc773 in 56ms, sequenceid=27, compaction requested=false 2024-11-13T20:08:00,555 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/acl/f7446bb3f02b60d610e520d40f8cc773/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-13T20:08:00,556 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,556 INFO [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:08:00,556 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f7446bb3f02b60d610e520d40f8cc773: Waiting for close lock at 1731528480496Running coprocessor pre-close hooks at 1731528480496Disabling compacts and flushes for region at 1731528480496Disabling writes for close at 1731528480496Obtaining lock to block concurrent updates at 1731528480497 (+1 ms)Preparing flush snapshotting stores in f7446bb3f02b60d610e520d40f8cc773 at 1731528480497Finished memstore snapshotting hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1731528480497Flushing stores of hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. at 1731528480498 (+1 ms)Flushing f7446bb3f02b60d610e520d40f8cc773/l: creating writer at 1731528480499 (+1 ms)Flushing f7446bb3f02b60d610e520d40f8cc773/l: appending metadata at 1731528480524 (+25 ms)Flushing f7446bb3f02b60d610e520d40f8cc773/l: closing flushed file at 1731528480524Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@deb6472: reopening flushed file at 1731528480546 (+22 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for f7446bb3f02b60d610e520d40f8cc773 in 56ms, sequenceid=27, compaction requested=false at 1731528480552 (+6 ms)Writing region close event to WAL at 1731528480553 (+1 ms)Running coprocessor post-close hooks at 1731528480556 (+3 ms)Closed at 1731528480556 2024-11-13T20:08:00,556 DEBUG [RS_CLOSE_REGION-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1731528164517.f7446bb3f02b60d610e520d40f8cc773. 2024-11-13T20:08:00,562 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/ns/083524bc506f46268f2c1ce25b857db2 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f./ns:/1731528321449/DeleteFamily/seqid=0 2024-11-13T20:08:00,565 INFO [regionserver/54d0784d1943:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T20:08:00,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742401_1577 (size=7779) 2024-11-13T20:08:00,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742401_1577 (size=7779) 2024-11-13T20:08:00,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742401_1577 (size=7779) 2024-11-13T20:08:00,567 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/ns/083524bc506f46268f2c1ce25b857db2 2024-11-13T20:08:00,576 INFO [regionserver/54d0784d1943:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T20:08:00,584 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/rep_barrier/3b9f761095de4197bcd71375f227b5c8 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f./rep_barrier:/1731528321449/DeleteFamily/seqid=0 2024-11-13T20:08:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742402_1578 (size=8005) 2024-11-13T20:08:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742402_1578 (size=8005) 2024-11-13T20:08:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742402_1578 (size=8005) 2024-11-13T20:08:00,590 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/rep_barrier/3b9f761095de4197bcd71375f227b5c8 2024-11-13T20:08:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-13T20:08:00,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-13T20:08:00,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-13T20:08:00,609 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/table/320b857294294b2a8adad0a3e7c0ce02 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731528301848.2bb4bf5ae8a4b752b723f73eda35b84f./table:/1731528321449/DeleteFamily/seqid=0 2024-11-13T20:08:00,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742403_1579 (size=8758) 2024-11-13T20:08:00,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742403_1579 (size=8758) 2024-11-13T20:08:00,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742403_1579 (size=8758) 2024-11-13T20:08:00,615 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/table/320b857294294b2a8adad0a3e7c0ce02 2024-11-13T20:08:00,619 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/info/370a7238f44c4e18a0ecf22f41199ded as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/info/370a7238f44c4e18a0ecf22f41199ded 2024-11-13T20:08:00,623 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/info/370a7238f44c4e18a0ecf22f41199ded, entries=74, sequenceid=199, filesize=14.0 K 2024-11-13T20:08:00,624 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/ns/083524bc506f46268f2c1ce25b857db2 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/ns/083524bc506f46268f2c1ce25b857db2 2024-11-13T20:08:00,628 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/ns/083524bc506f46268f2c1ce25b857db2, entries=23, sequenceid=199, filesize=7.6 K 2024-11-13T20:08:00,629 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/rep_barrier/3b9f761095de4197bcd71375f227b5c8 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/rep_barrier/3b9f761095de4197bcd71375f227b5c8 2024-11-13T20:08:00,633 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/rep_barrier/3b9f761095de4197bcd71375f227b5c8, entries=21, sequenceid=199, filesize=7.8 K 2024-11-13T20:08:00,633 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/.tmp/table/320b857294294b2a8adad0a3e7c0ce02 as hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/table/320b857294294b2a8adad0a3e7c0ce02 2024-11-13T20:08:00,638 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/table/320b857294294b2a8adad0a3e7c0ce02, entries=36, sequenceid=199, filesize=8.6 K 2024-11-13T20:08:00,639 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=199, compaction requested=false 2024-11-13T20:08:00,643 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-13T20:08:00,643 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:00,643 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T20:08:00,643 INFO [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T20:08:00,643 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731528480497Running coprocessor pre-close hooks at 1731528480497Disabling compacts and flushes for region at 1731528480497Disabling writes for close at 1731528480498 (+1 ms)Obtaining lock to block concurrent updates at 1731528480498Preparing flush snapshotting stores in 1588230740 at 1731528480498Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1731528480499 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731528480499Flushing 1588230740/info: creating writer at 1731528480499Flushing 1588230740/info: appending metadata at 1731528480532 (+33 ms)Flushing 1588230740/info: closing flushed file at 1731528480532Flushing 1588230740/ns: creating writer at 1731528480547 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731528480561 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731528480561Flushing 1588230740/rep_barrier: creating writer at 1731528480570 (+9 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731528480584 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731528480584Flushing 1588230740/table: creating writer at 1731528480594 (+10 ms)Flushing 1588230740/table: appending metadata at 1731528480609 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731528480609Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e84fdd9: reopening flushed file at 1731528480619 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e9811f2: reopening flushed file at 1731528480624 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d9fa2b4: reopening flushed file at 1731528480628 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e2b419d: reopening flushed file at 1731528480633 (+5 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=199, compaction requested=false at 1731528480639 (+6 ms)Writing region close event to WAL at 1731528480640 (+1 ms)Running coprocessor post-close hooks at 1731528480643 (+3 ms)Closed at 1731528480643 2024-11-13T20:08:00,644 DEBUG [RS_CLOSE_META-regionserver/54d0784d1943:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T20:08:00,699 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(976): stopping server 54d0784d1943,33333,1731528161197; all regions closed. 2024-11-13T20:08:00,699 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(976): stopping server 54d0784d1943,36439,1731528160987; all regions closed. 2024-11-13T20:08:00,699 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(976): stopping server 54d0784d1943,39533,1731528161273; all regions closed. 2024-11-13T20:08:00,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741836_1012 (size=81723) 2024-11-13T20:08:00,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741835_1011 (size=9044) 2024-11-13T20:08:00,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741836_1012 (size=81723) 2024-11-13T20:08:00,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741835_1011 (size=9044) 2024-11-13T20:08:00,711 DEBUG [RS:2;54d0784d1943:39533 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs 2024-11-13T20:08:00,711 INFO [RS:2;54d0784d1943:39533 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 54d0784d1943%2C39533%2C1731528161273:(num 1731528163637) 2024-11-13T20:08:00,711 DEBUG [RS:2;54d0784d1943:39533 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,711 INFO [RS:2;54d0784d1943:39533 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T20:08:00,711 INFO [RS:2;54d0784d1943:39533 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T20:08:00,712 INFO [RS:2;54d0784d1943:39533 {}] hbase.ChoreService(370): Chore service for: regionserver/54d0784d1943:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T20:08:00,712 INFO [RS:2;54d0784d1943:39533 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T20:08:00,712 INFO [RS:2;54d0784d1943:39533 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T20:08:00,712 INFO [RS:2;54d0784d1943:39533 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T20:08:00,712 INFO [regionserver/54d0784d1943:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T20:08:00,712 INFO [RS:2;54d0784d1943:39533 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T20:08:00,712 INFO [RS:2;54d0784d1943:39533 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39533 2024-11-13T20:08:00,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741834_1010 (size=15750) 2024-11-13T20:08:00,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741834_1010 (size=15750) 2024-11-13T20:08:00,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/54d0784d1943,39533,1731528161273 2024-11-13T20:08:00,717 INFO [RS:2;54d0784d1943:39533 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T20:08:00,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T20:08:00,720 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [54d0784d1943,39533,1731528161273] 2024-11-13T20:08:00,725 DEBUG [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs 2024-11-13T20:08:00,725 INFO [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 54d0784d1943%2C33333%2C1731528161197.meta:.meta(num 1731528164141) 2024-11-13T20:08:00,729 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/54d0784d1943,39533,1731528161273 already deleted, retry=false 2024-11-13T20:08:00,729 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 54d0784d1943,39533,1731528161273 expired; onlineServers=2 2024-11-13T20:08:00,739 DEBUG [RS:0;54d0784d1943:36439 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs 2024-11-13T20:08:00,739 INFO [RS:0;54d0784d1943:36439 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 54d0784d1943%2C36439%2C1731528160987:(num 1731528163630) 2024-11-13T20:08:00,739 DEBUG [RS:0;54d0784d1943:36439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,739 INFO [RS:0;54d0784d1943:36439 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T20:08:00,739 INFO [RS:0;54d0784d1943:36439 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T20:08:00,739 INFO [RS:0;54d0784d1943:36439 {}] hbase.ChoreService(370): Chore service for: regionserver/54d0784d1943:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T20:08:00,740 INFO [RS:0;54d0784d1943:36439 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T20:08:00,740 INFO [RS:0;54d0784d1943:36439 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T20:08:00,740 INFO [RS:0;54d0784d1943:36439 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T20:08:00,740 INFO [RS:0;54d0784d1943:36439 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T20:08:00,740 INFO [regionserver/54d0784d1943:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T20:08:00,740 INFO [RS:0;54d0784d1943:36439 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36439 2024-11-13T20:08:00,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/54d0784d1943,36439,1731528160987 2024-11-13T20:08:00,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T20:08:00,743 INFO [RS:0;54d0784d1943:36439 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T20:08:00,744 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [54d0784d1943,36439,1731528160987] 2024-11-13T20:08:00,746 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/54d0784d1943,36439,1731528160987 already deleted, retry=false 2024-11-13T20:08:00,746 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 54d0784d1943,36439,1731528160987 expired; onlineServers=1 2024-11-13T20:08:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073741833_1009 (size=12159) 2024-11-13T20:08:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073741833_1009 (size=12159) 2024-11-13T20:08:00,759 DEBUG [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/oldWALs 2024-11-13T20:08:00,759 INFO [RS:1;54d0784d1943:33333 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 54d0784d1943%2C33333%2C1731528161197:(num 1731528163629) 2024-11-13T20:08:00,759 DEBUG [RS:1;54d0784d1943:33333 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T20:08:00,759 INFO [RS:1;54d0784d1943:33333 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T20:08:00,759 INFO [RS:1;54d0784d1943:33333 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T20:08:00,759 INFO [RS:1;54d0784d1943:33333 {}] hbase.ChoreService(370): Chore service for: regionserver/54d0784d1943:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T20:08:00,759 INFO [RS:1;54d0784d1943:33333 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T20:08:00,759 INFO [regionserver/54d0784d1943:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T20:08:00,760 INFO [RS:1;54d0784d1943:33333 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33333 2024-11-13T20:08:00,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/54d0784d1943,33333,1731528161197 2024-11-13T20:08:00,762 INFO [RS:1;54d0784d1943:33333 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T20:08:00,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T20:08:00,766 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [54d0784d1943,33333,1731528161197] 2024-11-13T20:08:00,768 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/54d0784d1943,33333,1731528161197 already deleted, retry=false 2024-11-13T20:08:00,768 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 54d0784d1943,33333,1731528161197 expired; onlineServers=0 2024-11-13T20:08:00,768 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '54d0784d1943,34357,1731528160092' ***** 2024-11-13T20:08:00,768 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T20:08:00,768 INFO [M:0;54d0784d1943:34357 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T20:08:00,768 INFO [M:0;54d0784d1943:34357 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T20:08:00,768 DEBUG [M:0;54d0784d1943:34357 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T20:08:00,769 DEBUG [M:0;54d0784d1943:34357 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T20:08:00,769 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T20:08:00,769 DEBUG [master/54d0784d1943:0:becomeActiveMaster-HFileCleaner.small.0-1731528162950 {}] cleaner.HFileCleaner(306): Exit Thread[master/54d0784d1943:0:becomeActiveMaster-HFileCleaner.small.0-1731528162950,5,FailOnTimeoutGroup] 2024-11-13T20:08:00,769 DEBUG [master/54d0784d1943:0:becomeActiveMaster-HFileCleaner.large.0-1731528162947 {}] cleaner.HFileCleaner(306): Exit Thread[master/54d0784d1943:0:becomeActiveMaster-HFileCleaner.large.0-1731528162947,5,FailOnTimeoutGroup] 2024-11-13T20:08:00,769 INFO [M:0;54d0784d1943:34357 {}] hbase.ChoreService(370): Chore service for: master/54d0784d1943:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T20:08:00,769 INFO [M:0;54d0784d1943:34357 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T20:08:00,769 DEBUG [M:0;54d0784d1943:34357 {}] master.HMaster(1795): Stopping service threads 2024-11-13T20:08:00,769 INFO [M:0;54d0784d1943:34357 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T20:08:00,770 INFO [M:0;54d0784d1943:34357 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T20:08:00,770 INFO [M:0;54d0784d1943:34357 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T20:08:00,770 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T20:08:00,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T20:08:00,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T20:08:00,777 DEBUG [M:0;54d0784d1943:34357 {}] zookeeper.ZKUtil(347): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T20:08:00,778 WARN [M:0;54d0784d1943:34357 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T20:08:00,780 INFO [M:0;54d0784d1943:34357 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/.lastflushedseqids 2024-11-13T20:08:00,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:08:00,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39533-0x100edb1e38f0003, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:08:00,824 INFO [RS:2;54d0784d1943:39533 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T20:08:00,825 INFO [RS:2;54d0784d1943:39533 {}] regionserver.HRegionServer(1031): Exiting; stopping=54d0784d1943,39533,1731528161273; zookeeper connection closed. 2024-11-13T20:08:00,832 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6fdac841 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6fdac841 2024-11-13T20:08:00,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38925 is added to blk_1073742404_1580 (size=329) 2024-11-13T20:08:00,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42549 is added to blk_1073742404_1580 (size=329) 2024-11-13T20:08:00,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073742404_1580 (size=329) 2024-11-13T20:08:00,836 INFO [M:0;54d0784d1943:34357 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T20:08:00,836 INFO [M:0;54d0784d1943:34357 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T20:08:00,836 DEBUG [M:0;54d0784d1943:34357 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T20:08:00,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:08:00,845 INFO [RS:0;54d0784d1943:36439 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T20:08:00,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36439-0x100edb1e38f0001, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:08:00,845 INFO [RS:0;54d0784d1943:36439 {}] regionserver.HRegionServer(1031): Exiting; stopping=54d0784d1943,36439,1731528160987; zookeeper connection closed. 2024-11-13T20:08:00,846 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@715c30fe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@715c30fe 2024-11-13T20:08:00,857 INFO [M:0;54d0784d1943:34357 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:08:00,857 DEBUG [M:0;54d0784d1943:34357 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:08:00,857 DEBUG [M:0;54d0784d1943:34357 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T20:08:00,858 DEBUG [M:0;54d0784d1943:34357 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T20:08:00,858 INFO [M:0;54d0784d1943:34357 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=828.69 KB heapSize=993.33 KB 2024-11-13T20:08:00,858 ERROR [AsyncFSWAL-0-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:08:00,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:08:00,867 INFO [RS:1;54d0784d1943:33333 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T20:08:00,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33333-0x100edb1e38f0002, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:08:00,867 INFO [RS:1;54d0784d1943:33333 {}] regionserver.HRegionServer(1031): Exiting; stopping=54d0784d1943,33333,1731528161197; zookeeper connection closed. 2024-11-13T20:08:00,867 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4917fc6c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4917fc6c 2024-11-13T20:08:00,868 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-13T20:08:02,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741834_1010 (size=15750) 2024-11-13T20:08:02,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741836_1012 (size=81723) 2024-11-13T20:08:02,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741835_1011 (size=9044) 2024-11-13T20:08:02,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741830_1006 (size=973994) 2024-11-13T20:08:05,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33293 is added to blk_1073741833_1009 (size=12159) 2024-11-13T20:08:06,028 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:08:08,854 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:08:10,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T20:08:10,607 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T20:08:10,608 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T20:08:16,109 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:08:38,855 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;54d0784d1943:34357 234 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 60 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@295fd76d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c48bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3876 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.CountDownLatch$Sync@5c567943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 9565 Waited count: 10088 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2b548afb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@62a08c9d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 770 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@775a1916-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:37069}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3191 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47d31291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43181): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 37460 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ae1dccf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43181): State: TIMED_WAITING Blocked count: 45 Waited count: 2263 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2253 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2255 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43181): State: TIMED_WAITING Blocked count: 51 Waited count: 2247 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43181): State: TIMED_WAITING Blocked count: 46 Waited count: 2263 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1545047682)): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp716438074-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp716438074-87-acceptor-0@66e3e3ed-ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:39707}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp716438074-88): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp716438074-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2043cf27-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5b2ac2cc): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 274 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@650a7060 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1306 Waited count: 1436 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@62b9b04d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 388 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 386 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1330250818-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1330250818-121): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1330250818-122-acceptor-0@4cf31fea-ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:40503}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1330250818-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-1b631abe-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins): State: TIMED_WAITING Blocked count: 1391 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 0 Waited count: 1919 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a54831e): State: TIMED_WAITING Blocked count: 0 Waited count: 766 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 41717): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 272 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f625b80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1300 Waited count: 1449 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2585690e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1441691316-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1441691316-158-acceptor-0@45e4f4d7-ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:39501}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1441691316-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1441691316-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-6317a7da-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@570ff40a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (java.util.concurrent.ThreadPoolExecutor$Worker@1e650b0b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@69e47042): State: TIMED_WAITING Blocked count: 0 Waited count: 765 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35411): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 1 Waited count: 279 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbce19f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1298 Waited count: 1442 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2d846603): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4c281f18[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62658): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bcd9ce8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:62658):): State: WAITING Blocked count: 0 Waited count: 455 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2135e78f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 487 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@560eef75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 246 (LeaseRenewer:jenkins@localhost:43181): State: TIMED_WAITING Blocked count: 10 Waited count: 393 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3555c440 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 3 Waited count: 273 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:62658)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0661a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@793cd5a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62d07906 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 201 Waited count: 744 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b8c0422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 16 Waited count: 159 Waiting on java.util.concurrent.Semaphore$NonfairSync@62ae6780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357): State: WAITING Blocked count: 100 Waited count: 7833 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d20178d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@37dfe3cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@26dc00d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5bbde7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@d798786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45f0fc59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 19 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;54d0784d1943:34357): State: TIMED_WAITING Blocked count: 11 Waited count: 3195 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1079/0x00007ff6ecf6f948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2356ff0d): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3781 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 47 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 390 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 79 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 37752 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 36 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 446 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@34147c36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35f5ddb5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77c26124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e97dc4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 488 (LeaseRenewer:jenkins.hfs.1@localhost:43181): State: TIMED_WAITING Blocked count: 10 Waited count: 395 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 492 (LeaseRenewer:jenkins.hfs.0@localhost:43181): State: TIMED_WAITING Blocked count: 10 Waited count: 395 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 495 (LeaseRenewer:jenkins.hfs.2@localhost:43181): State: TIMED_WAITING Blocked count: 10 Waited count: 396 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 499 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (region-location-0): State: WAITING Blocked count: 12 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 37570 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (region-location-1): State: WAITING Blocked count: 8 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 455 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 58 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@563cd59a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 545 Waiting on java.util.concurrent.ForkJoinPool@22f69a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1494 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40dc79e9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2530 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2531 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 413 Waiting on java.util.concurrent.ForkJoinPool@22f69a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4346 (java.util.concurrent.ThreadPoolExecutor$Worker@297a5ac6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56680922 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4771 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4773 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 7256 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.ForkJoinPool@22f69a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8941 (AsyncFSWAL-1-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581ae352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8942 (java.util.concurrent.ThreadPoolExecutor$Worker@4bf9cc2c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8943 (java.util.concurrent.ThreadPoolExecutor$Worker@4d7a8c08[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8946 (java.util.concurrent.ThreadPoolExecutor$Worker@29f58b80[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8947 (java.util.concurrent.ThreadPoolExecutor$Worker@347f2a97[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8953 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T20:09:08,855 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:09:38,855 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;54d0784d1943:34357 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 60 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@295fd76d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c48bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4476 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 45 Waiting on java.util.concurrent.CountDownLatch$Sync@52320bcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 9565 Waited count: 10089 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2b548afb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@62a08c9d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 890 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@775a1916-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:37069}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3191 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47d31291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43181): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 43422 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ae1dccf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43181): State: TIMED_WAITING Blocked count: 45 Waited count: 2324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2316 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43181): State: TIMED_WAITING Blocked count: 51 Waited count: 2308 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43181): State: TIMED_WAITING Blocked count: 46 Waited count: 2324 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1545047682)): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp716438074-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp716438074-87-acceptor-0@66e3e3ed-ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:39707}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp716438074-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp716438074-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2043cf27-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5b2ac2cc): State: TIMED_WAITING Blocked count: 0 Waited count: 887 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 294 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@650a7060 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1326 Waited count: 1476 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@62b9b04d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 448 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1330250818-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1330250818-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1330250818-122-acceptor-0@4cf31fea-ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:40503}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1330250818-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-1b631abe-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins): State: TIMED_WAITING Blocked count: 1451 Waited count: 1452 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 0 Waited count: 1979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a54831e): State: TIMED_WAITING Blocked count: 0 Waited count: 886 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 41717): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 292 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f625b80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1320 Waited count: 1489 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2585690e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1441691316-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1441691316-158-acceptor-0@45e4f4d7-ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:39501}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1441691316-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1441691316-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-6317a7da-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@570ff40a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (java.util.concurrent.ThreadPoolExecutor$Worker@1e650b0b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@69e47042): State: TIMED_WAITING Blocked count: 0 Waited count: 885 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35411): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 1 Waited count: 299 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbce19f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1318 Waited count: 1482 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2d846603): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 444 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4c281f18[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62658): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 372 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bcd9ce8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:62658):): State: WAITING Blocked count: 0 Waited count: 460 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2135e78f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 492 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@560eef75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3555c440 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 3 Waited count: 301 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:62658)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0661a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@793cd5a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62d07906 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 201 Waited count: 744 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b8c0422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 16 Waited count: 159 Waiting on java.util.concurrent.Semaphore$NonfairSync@62ae6780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357): State: WAITING Blocked count: 100 Waited count: 7833 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d20178d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@37dfe3cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@26dc00d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5bbde7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@d798786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45f0fc59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 19 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;54d0784d1943:34357): State: TIMED_WAITING Blocked count: 11 Waited count: 3195 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1079/0x00007ff6ecf6f948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2356ff0d): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4380 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 47 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 390 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 79 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@340edf70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43754 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 36 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 446 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@34147c36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35f5ddb5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77c26124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e97dc4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 499 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (region-location-0): State: WAITING Blocked count: 12 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 43572 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (region-location-1): State: WAITING Blocked count: 8 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 58 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@563cd59a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 545 Waiting on java.util.concurrent.ForkJoinPool@22f69a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1494 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40dc79e9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2530 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2531 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 413 Waiting on java.util.concurrent.ForkJoinPool@22f69a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4346 (java.util.concurrent.ThreadPoolExecutor$Worker@297a5ac6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56680922 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4771 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4773 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 7256 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8941 (AsyncFSWAL-1-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581ae352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8953 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T20:10:08,855 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:10:38,856 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;54d0784d1943:34357 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 60 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@295fd76d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c48bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.CountDownLatch$Sync@69dd4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 9565 Waited count: 10090 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2b548afb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@62a08c9d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 1010 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@775a1916-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:37069}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3191 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47d31291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43181): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 49384 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ae1dccf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43181): State: TIMED_WAITING Blocked count: 45 Waited count: 2385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2375 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43181): State: TIMED_WAITING Blocked count: 51 Waited count: 2370 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43181): State: TIMED_WAITING Blocked count: 46 Waited count: 2385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1545047682)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp716438074-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp716438074-87-acceptor-0@66e3e3ed-ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:39707}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp716438074-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp716438074-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2043cf27-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5b2ac2cc): State: TIMED_WAITING Blocked count: 0 Waited count: 1007 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@650a7060 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1346 Waited count: 1516 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@62b9b04d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1330250818-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1330250818-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1330250818-122-acceptor-0@4cf31fea-ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:40503}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1330250818-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-1b631abe-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins): State: TIMED_WAITING Blocked count: 1511 Waited count: 1512 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 0 Waited count: 2039 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a54831e): State: TIMED_WAITING Blocked count: 0 Waited count: 1006 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 41717): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 312 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f625b80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1340 Waited count: 1529 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2585690e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1441691316-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1441691316-158-acceptor-0@45e4f4d7-ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:39501}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1441691316-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1441691316-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-6317a7da-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@570ff40a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (java.util.concurrent.ThreadPoolExecutor$Worker@1e650b0b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@69e47042): State: TIMED_WAITING Blocked count: 0 Waited count: 1005 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35411): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 1 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbce19f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1338 Waited count: 1522 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2d846603): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 504 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4c281f18[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62658): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 376 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bcd9ce8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:62658):): State: WAITING Blocked count: 0 Waited count: 464 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2135e78f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 496 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@560eef75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3555c440 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 3 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:62658)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0661a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@793cd5a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62d07906 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 201 Waited count: 744 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b8c0422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 16 Waited count: 159 Waiting on java.util.concurrent.Semaphore$NonfairSync@62ae6780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357): State: WAITING Blocked count: 100 Waited count: 7833 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d20178d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@37dfe3cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@26dc00d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5bbde7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@d798786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45f0fc59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 19 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;54d0784d1943:34357): State: TIMED_WAITING Blocked count: 11 Waited count: 3195 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1079/0x00007ff6ecf6f948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2356ff0d): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4980 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 47 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 390 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 79 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@340edf70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49756 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 36 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 446 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@34147c36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35f5ddb5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77c26124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e97dc4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 499 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (region-location-0): State: WAITING Blocked count: 12 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49573 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (region-location-1): State: WAITING Blocked count: 8 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 58 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@563cd59a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 545 Waiting on java.util.concurrent.ForkJoinPool@22f69a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1494 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40dc79e9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2530 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2531 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 414 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 4346 (java.util.concurrent.ThreadPoolExecutor$Worker@297a5ac6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56680922 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4771 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4773 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8941 (AsyncFSWAL-1-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581ae352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8953 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T20:11:08,856 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:11:38,856 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;54d0784d1943:34357 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 60 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@295fd76d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c48bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5676 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@70de85b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 9565 Waited count: 10091 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2b548afb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@62a08c9d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 1130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@775a1916-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:37069}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3191 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47d31291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43181): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 190 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 55344 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ae1dccf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43181): State: TIMED_WAITING Blocked count: 45 Waited count: 2445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43181): State: TIMED_WAITING Blocked count: 51 Waited count: 2430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43181): State: TIMED_WAITING Blocked count: 46 Waited count: 2446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1545047682)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp716438074-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp716438074-87-acceptor-0@66e3e3ed-ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:39707}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp716438074-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp716438074-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2043cf27-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5b2ac2cc): State: TIMED_WAITING Blocked count: 0 Waited count: 1127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 334 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@650a7060 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1366 Waited count: 1556 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@62b9b04d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1330250818-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1330250818-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1330250818-122-acceptor-0@4cf31fea-ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:40503}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1330250818-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-1b631abe-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins): State: TIMED_WAITING Blocked count: 1571 Waited count: 1572 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 0 Waited count: 2099 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a54831e): State: TIMED_WAITING Blocked count: 0 Waited count: 1126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 41717): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 332 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f625b80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1360 Waited count: 1569 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2585690e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1441691316-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1441691316-158-acceptor-0@45e4f4d7-ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:39501}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1441691316-159): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1441691316-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-6317a7da-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@570ff40a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (java.util.concurrent.ThreadPoolExecutor$Worker@1e650b0b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@69e47042): State: TIMED_WAITING Blocked count: 0 Waited count: 1125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35411): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 1 Waited count: 339 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbce19f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1358 Waited count: 1562 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2d846603): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4c281f18[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62658): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 380 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bcd9ce8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:62658):): State: WAITING Blocked count: 0 Waited count: 468 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2135e78f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@560eef75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3555c440 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 3 Waited count: 357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:62658)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0661a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@793cd5a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62d07906 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 201 Waited count: 744 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b8c0422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 16 Waited count: 159 Waiting on java.util.concurrent.Semaphore$NonfairSync@62ae6780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357): State: WAITING Blocked count: 100 Waited count: 7833 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d20178d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@37dfe3cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@26dc00d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5bbde7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@d798786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45f0fc59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 19 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;54d0784d1943:34357): State: TIMED_WAITING Blocked count: 11 Waited count: 3195 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1079/0x00007ff6ecf6f948.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2356ff0d): State: TIMED_WAITING Blocked count: 0 Waited count: 187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5580 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 47 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 390 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 79 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@340edf70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55758 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 36 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 446 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@34147c36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35f5ddb5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77c26124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e97dc4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 499 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (region-location-0): State: WAITING Blocked count: 12 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55575 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (region-location-1): State: WAITING Blocked count: 8 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 58 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@563cd59a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 546 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1494 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40dc79e9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2530 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2531 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4346 (java.util.concurrent.ThreadPoolExecutor$Worker@297a5ac6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56680922 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4771 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4773 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8941 (AsyncFSWAL-1-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581ae352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8953 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T20:12:08,856 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:12:38,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T20:12:41,404 DEBUG [master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-13T20:12:41,406 DEBUG [master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T20:12:49,717 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T20:13:00,859 DEBUG [M:0;54d0784d1943:34357 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731528480836Disabling compacts and flushes for region at 1731528480836Disabling writes for close at 1731528480857 (+21 ms)Obtaining lock to block concurrent updates at 1731528480858 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731528480858Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=848579, getHeapSize=1017104, getOffHeapSize=0, getCellsCount=2221 at 1731528480858Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1731528780859 (+300001 ms) 2024-11-13T20:13:00,859 WARN [M:0;54d0784d1943:34357 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3823, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3823, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-13T20:13:00,861 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:13:00,862 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T20:13:00,862 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T20:13:00,863 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 2024-11-13T20:13:00,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:13:00,865 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:13:00,865 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 2024-11-13T20:13:00,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;54d0784d1943:34357 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 60 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@295fd76d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 16 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c48bea2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6275 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 63 Waiting on java.util.concurrent.CountDownLatch$Sync@46cf17c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 9565 Waited count: 10092 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2b548afb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@62a08c9d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@696e100d): State: TIMED_WAITING Blocked count: 0 Waited count: 1250 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp274140085-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp274140085-38): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp274140085-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp274140085-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp274140085-41-acceptor-0@775a1916-ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:37069}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp274140085-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp274140085-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp274140085-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-1e972b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 25 Waited count: 3191 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47d31291 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43181): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@27d6be6a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@54873d29): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 210 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 61306 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 3 Waited count: 1524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ae1dccf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43181): State: TIMED_WAITING Blocked count: 45 Waited count: 2506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2496 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43181): State: TIMED_WAITING Blocked count: 53 Waited count: 2498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43181): State: TIMED_WAITING Blocked count: 51 Waited count: 2491 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43181): State: TIMED_WAITING Blocked count: 46 Waited count: 2507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@f16ac0b): State: TIMED_WAITING Blocked count: 0 Waited count: 312 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@44c4ec06): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6f1ce4b8): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@7db6dfd0): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1545047682)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp716438074-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp716438074-87-acceptor-0@66e3e3ed-ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:39707}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp716438074-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp716438074-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2043cf27-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5b2ac2cc): State: TIMED_WAITING Blocked count: 0 Waited count: 1247 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40237): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 354 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@650a7060 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1386 Waited count: 1596 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@62b9b04d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40237): State: TIMED_WAITING Blocked count: 0 Waited count: 626 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1330250818-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1330250818-121): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1330250818-122-acceptor-0@4cf31fea-ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:40503}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1330250818-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-1b631abe-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (IPC Client (346890841) connection to localhost/127.0.0.1:43181 from jenkins): State: TIMED_WAITING Blocked count: 1631 Waited count: 1632 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 0 Waited count: 2159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a54831e): State: TIMED_WAITING Blocked count: 0 Waited count: 1246 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 41717): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 2 Waited count: 352 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f625b80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1380 Waited count: 1609 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2585690e): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 623 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 623 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41717): State: TIMED_WAITING Blocked count: 0 Waited count: 623 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp1441691316-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff6ec42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp1441691316-158-acceptor-0@45e4f4d7-ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:39501}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp1441691316-159): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp1441691316-160): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-6317a7da-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4)): State: TIMED_WAITING Blocked count: 9 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4bf9e774 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@14f62109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@570ff40a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (java.util.concurrent.ThreadPoolExecutor$Worker@1e650b0b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@69e47042): State: TIMED_WAITING Blocked count: 0 Waited count: 1245 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35411): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 1 Waited count: 359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbce19f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181): State: TIMED_WAITING Blocked count: 1378 Waited count: 1602 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2d846603): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 623 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35411): State: TIMED_WAITING Blocked count: 0 Waited count: 623 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@124cb00c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4c281f18[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62658): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 312 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 8 Waited count: 385 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bcd9ce8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:62658):): State: WAITING Blocked count: 0 Waited count: 473 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2135e78f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 505 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@560eef75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3555c440 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 3 Waited count: 388 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:62658)): State: RUNNABLE Blocked count: 15 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0661a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 69 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@793cd5a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22e82629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62d07906 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 201 Waited count: 744 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b8c0422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 16 Waited count: 159 Waiting on java.util.concurrent.Semaphore$NonfairSync@62ae6780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34357): State: WAITING Blocked count: 100 Waited count: 7833 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d20178d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1cd95966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@37dfe3cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@26dc00d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5bbde7fe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34357): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@d798786 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@45f0fc59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 19 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;54d0784d1943:34357): State: TIMED_WAITING Blocked count: 11 Waited count: 3196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1329/0x00007ff6ed1cdac0.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/54d0784d1943:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@2356ff0d): State: TIMED_WAITING Blocked count: 0 Waited count: 207 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6179 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 47 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 390 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 79 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@340edf70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61759 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 422 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 36 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 13 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 446 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@34147c36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 466 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35f5ddb5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77c26124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/54d0784d1943:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e97dc4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 499 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (region-location-0): State: WAITING Blocked count: 12 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61577 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (region-location-1): State: WAITING Blocked count: 8 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (region-location-2): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1037 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1082 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 58 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@563cd59a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1494 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@40dc79e9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2530 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2531 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39f5f064 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4346 (java.util.concurrent.ThreadPoolExecutor$Worker@297a5ac6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56680922 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4771 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4772 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4773 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8941 (AsyncFSWAL-1-hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData-prefix:54d0784d1943,34357,1731528160092): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@581ae352 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8954 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8956 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8958 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8959 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1319/0x00007ff6ed1c3110.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-13T20:13:04,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T20:13:05,861 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-13T20:13:05,861 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T20:13:05,861 INFO [M:0;54d0784d1943:34357 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T20:13:05,862 INFO [M:0;54d0784d1943:34357 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34357 2024-11-13T20:13:05,862 INFO [M:0;54d0784d1943:34357 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T20:13:05,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43181/user/jenkins/test-data/0f172a90-8f75-93f9-5bda-9c3cb2b27248/MasterData/WALs/54d0784d1943,34357,1731528160092/54d0784d1943%2C34357%2C1731528160092.1731528161976 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-13T20:13:05,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:13:05,964 INFO [M:0;54d0784d1943:34357 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T20:13:05,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34357-0x100edb1e38f0000, quorum=127.0.0.1:62658, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T20:13:05,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de6c5d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T20:13:05,969 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f3eae39{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:13:05,969 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:13:05,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e4553a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T20:13:05,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76212b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:13:05,971 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T20:13:05,971 WARN [BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T20:13:05,971 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T20:13:05,971 WARN [BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-630360117-172.17.0.2-1731528155198 (Datanode Uuid 78bac615-6573-4f37-853d-8a4bd794b839) service to localhost/127.0.0.1:43181 2024-11-13T20:13:05,972 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data5/current/BP-630360117-172.17.0.2-1731528155198 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T20:13:05,973 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data6/current/BP-630360117-172.17.0.2-1731528155198 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T20:13:05,973 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T20:13:05,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b7103b4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T20:13:05,975 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6fd9f7d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:13:05,976 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:13:05,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22fc93bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T20:13:05,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32c01182{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:13:05,977 WARN [BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T20:13:05,977 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T20:13:05,977 WARN [BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-630360117-172.17.0.2-1731528155198 (Datanode Uuid 11dcf2d9-05d6-42c6-8e49-507f8540cb52) service to localhost/127.0.0.1:43181 2024-11-13T20:13:05,977 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T20:13:05,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data3/current/BP-630360117-172.17.0.2-1731528155198 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T20:13:05,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data4/current/BP-630360117-172.17.0.2-1731528155198 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T20:13:05,978 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T20:13:05,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3de744c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T20:13:05,981 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3937fd27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:13:05,981 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:13:05,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@148bf833{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T20:13:05,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69e7add{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:13:05,983 WARN [BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T20:13:05,983 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T20:13:05,983 WARN [BP-630360117-172.17.0.2-1731528155198 heartbeating to localhost/127.0.0.1:43181 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-630360117-172.17.0.2-1731528155198 (Datanode Uuid 28be5666-4b1b-41de-bf86-7a826041cf21) service to localhost/127.0.0.1:43181 2024-11-13T20:13:05,983 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T20:13:05,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data1/current/BP-630360117-172.17.0.2-1731528155198 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T20:13:05,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/cluster_daabecd0-075e-1b1b-41fe-9a9dedf6884c/data/data2/current/BP-630360117-172.17.0.2-1731528155198 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T20:13:05,984 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T20:13:05,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d3f6b4f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T20:13:05,991 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58f01c16{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T20:13:05,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T20:13:05,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cebb95a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T20:13:05,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5140b357{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/ecd4db24-a1ed-f6a8-a1d1-89f1aa2d3584/hadoop.log.dir/,STOPPED} 2024-11-13T20:13:06,005 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T20:13:06,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down